int trt_engine_cache_enable
const char * trt_tactic_sources
int trt_cuda_graph_enable
const char * trt_profile_max_shapes
const char * trt_timing_cache_path
int trt_engine_decryption_enable
const char * trt_ep_context_file_path
const char * trt_extra_plugin_lib_paths
const char * trt_profile_min_shapes
int trt_dump_ep_context_model
int trt_context_memory_sharing_enable
void * user_compute_stream
int trt_min_subgraph_size
int trt_max_partition_iterations
int trt_ep_context_embed_mode
const char * trt_profile_opt_shapes
Options for the TensorRT provider that are passed to SessionOptionsAppendExecutionProvider_TensorRT_V...
const char * trt_int8_calibration_table_name
OrtTensorRTProviderOptionsV2 & operator=(const OrtTensorRTProviderOptionsV2 &other)
const char * trt_engine_cache_prefix
const char * trt_engine_cache_path
size_t trt_max_workspace_size
int trt_int8_use_native_calibration_table
int trt_timing_cache_enable
int trt_force_timing_cache
int trt_build_heuristics_enable
int trt_layer_norm_fp32_fallback
int trt_detailed_build_log
int trt_force_sequential_engine_build
int trt_auxiliary_streams
int has_user_compute_stream
const char * trt_engine_decryption_lib_path
int trt_builder_optimization_level