@@ -359,14 +359,6 @@ void MapInputsAndDetermineDTypes(
359359 }
360360}
361361
362- uint64_t GetRecommendedWorkspaceSize (const runtime::CudaDevice& device) {
363- if (device.major < 6 ) {
364- return 256 * (1 << 20 );
365- } else {
366- return 1 << 30 ;
367- }
368- }
369-
370362std::string ConvertGraphToTRTEngine (const torch::jit::script::Module& mod, std::string method_name, CompileSpec cfg) {
371363 // Go through Lowering to simplify graph and extract weight parameters
372364 auto graph_and_parameters = lowering::Lower (mod, method_name, cfg.lower_info );
@@ -380,14 +372,14 @@ std::string ConvertGraphToTRTEngine(const torch::jit::script::Module& mod, std::
380372 // Infer the type of an input from the weights of the calculation
381373 auto first_use_types = ir::get_block_first_calc_dtypes_opt (g->block ());
382374
383- // GPU default WS size : 1 GB
384- // Set WS = 256 Mb for Jetson nano/TX1 like platforms whose compute capability is 5.X.
385- auto workspace_size = cfg.convert_info .engine_settings .workspace_size ;
386- auto device_spec = cfg.convert_info .engine_settings .device ;
387- auto cuda_device = runtime::CudaDevice (device_spec.gpu_id , device_spec.device_type );
388- if (workspace_size == 0 ) {
389- cfg.convert_info .engine_settings .workspace_size = GetRecommendedWorkspaceSize (cuda_device);
390- }
375+ // // GPU default WS size : 1 GB
376+ // // Set WS = 256 Mb for Jetson nano/TX1 like platforms whose compute capability is 5.X.
377+ // auto workspace_size = cfg.convert_info.engine_settings.workspace_size;
378+ // auto device_spec = cfg.convert_info.engine_settings.device;
379+ // auto cuda_device = runtime::CudaDevice(device_spec.gpu_id, device_spec.device_type);
380+ // if (workspace_size == 0) {
381+ // cfg.convert_info.engine_settings.workspace_size = GetRecommendedWorkspaceSize(cuda_device);
382+ // }
391383
392384 MapInputsAndDetermineDTypes (cfg, g, static_params, first_use_types);
393385
@@ -399,14 +391,14 @@ std::string ConvertGraphToTRTEngine(const torch::jit::script::Module& mod, std::
399391torch::jit::Module CompileGraph (const torch::jit::Module& mod, CompileSpec cfg) {
400392 torch::jit::Module new_mod (mod._ivalue ()->name () + " _trt" );
401393
402- // GPU default WS size : 1 GB
403- // Set WS = 256 Mb for Jetson nano/TX1 like platforms whose compute capability is 5.X.
404- auto workspace_size = cfg.convert_info .engine_settings .workspace_size ;
394+ // // GPU default WS size : 1 GB
395+ // // Set WS = 256 Mb for Jetson nano/TX1 like platforms whose compute capability is 5.X.
396+ // auto workspace_size = cfg.convert_info.engine_settings.workspace_size;
405397 auto device_spec = cfg.convert_info .engine_settings .device ;
406398 auto cuda_device = runtime::CudaDevice (device_spec.gpu_id , device_spec.device_type );
407- if (workspace_size == 0 ) {
408- cfg.convert_info .engine_settings .workspace_size = GetRecommendedWorkspaceSize (cuda_device);
409- }
399+ // if (workspace_size == 0) {
400+ // cfg.convert_info.engine_settings.workspace_size = GetRecommendedWorkspaceSize(cuda_device);
401+ // }
410402
411403 for (const torch::jit::Method& method : mod.get_methods ()) {
412404 if (method.name ().compare (" forward" ) == 0 ) {
0 commit comments