@@ -19,7 +19,11 @@ namespace conversion {
1919namespace evaluators {
2020namespace {
2121
22- nvinfer1::ITensor* index_layer (ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* input_tensor, int64_t index){
22+ nvinfer1::ITensor* index_layer (
23+ ConversionCtx* ctx,
24+ const torch::jit::Node* n,
25+ nvinfer1::ITensor* input_tensor,
26+ int64_t index) {
2327 // index to access needs to be an at::Tensor
2428 at::Tensor indices = torch::tensor ({index}).to (torch::kI32 );
2529 auto indices_out = torch_tensorrt::core::conversion::converters::tensor_to_const (ctx, indices);
@@ -30,15 +34,15 @@ nvinfer1::ITensor* index_layer(ConversionCtx* ctx, const torch::jit::Node* n, nv
3034 return indexed_tensor;
3135}
3236
33- c10::IValue dynamic_size_layer (ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args){
37+ c10::IValue dynamic_size_layer (ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) {
3438 LOG_DEBUG (" Using dynamic version of aten::size evaluator" );
3539 auto in = args.at (n->input (0 )).ITensorOrFreeze (ctx);
3640 LOG_DEBUG (" Input dimensions: " << in->getDimensions ());
3741 auto shape_layer = ctx->net ->addShape (*in);
3842 TORCHTRT_CHECK (shape_layer, " Unable to create shape layer from node: " << *n);
3943 auto shape_1d_tensor = shape_layer->getOutput (0 );
4044
41- if (n->inputs ().size () != 1 ){
45+ if (n->inputs ().size () != 1 ) {
4246 auto maxDim = static_cast <int64_t >(in->getDimensions ().nbDims );
4347 auto dim = args.at (n->input (1 )).unwrapToInt ();
4448 // Handle negative axis by refering to nbDims of input Tensor
@@ -306,7 +310,7 @@ auto aten_registrations TORCHTRT_UNUSED =
306310 if (n->inputs ().size () == 1 ) {
307311 if (tensor_var.isITensor ()) {
308312 auto tensor = tensor_var.ITensor ();
309- if (ctx->input_is_dynamic ){
313+ if (ctx->input_is_dynamic ) {
310314 return dynamic_size_layer (ctx, n, args);
311315 }
312316 return util::toVec (tensor->getDimensions ());
@@ -322,7 +326,7 @@ auto aten_registrations TORCHTRT_UNUSED =
322326 } else {
323327 auto dim = args.at (n->input (1 )).unwrapToInt ();
324328 if (tensor_var.isITensor ()) {
325- if (ctx->input_is_dynamic ){
329+ if (ctx->input_is_dynamic ) {
326330 return dynamic_size_layer (ctx, n, args);
327331 }
328332 auto tensor = tensor_var.ITensor ();
@@ -359,14 +363,14 @@ auto aten_registrations TORCHTRT_UNUSED =
359363 [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
360364 auto list_input = args.at (n->input (0 ));
361365 auto idx = args.at (n->input (1 )).unwrapToInt ();
362- if (list_input.isIValue ()){
363- auto list = args.at (n->input (0 )).IValue ()->to <c10::List<c10::IValue>>();
364- const int64_t list_size = list.size ();
365- const int64_t normalized_idx = normalizeIndex (idx, list_size);
366- TORCHTRT_CHECK (
367- normalized_idx >= 0 || normalized_idx < list_size, " List index out of range (aten::__getitem__)" );
368- return list.get (normalized_idx);
369- } else if (list_input.isITensor ()){
366+ if (list_input.isIValue ()) {
367+ auto list = args.at (n->input (0 )).IValue ()->to <c10::List<c10::IValue>>();
368+ const int64_t list_size = list.size ();
369+ const int64_t normalized_idx = normalizeIndex (idx, list_size);
370+ TORCHTRT_CHECK (
371+ normalized_idx >= 0 || normalized_idx < list_size, " List index out of range (aten::__getitem__)" );
372+ return list.get (normalized_idx);
373+ } else if (list_input.isITensor ()) {
370374 auto indexed_tensor = index_layer (ctx, n, list_input.ITensorOrFreeze (ctx), idx);
371375 auto tensor_holder = TensorContainer ();
372376 tensor_holder.hold_tensor (indexed_tensor);
0 commit comments