Skip to content

Commit f76a76b

Browse files
Fix the coverage report issues (#3874)
1 parent 8a61d9b commit f76a76b

File tree

7 files changed

+66
-68
lines changed

7 files changed

+66
-68
lines changed

docker/Dockerfile

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,7 @@ ENV PYTHON_VERSION=${PYTHON_VERSION}
1616
ENV DEBIAN_FRONTEND=noninteractive
1717

1818
# Install basic dependencies
19-
RUN apt-get update
20-
RUN apt install -y build-essential manpages-dev wget zlib1g software-properties-common git libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget ca-certificates curl llvm libncurses5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev mecab-ipadic-utf8
19+
RUN apt-get update && apt-get install -y build-essential manpages-dev wget zlib1g software-properties-common git libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget ca-certificates curl llvm libncurses5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev mecab-ipadic-utf8
2120

2221
# Install PyEnv and desired Python version
2322
ENV HOME="/root"
@@ -34,8 +33,7 @@ RUN pyenv global ${PYTHON_VERSION}
3433
# Install TensorRT + dependencies
3534
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
3635
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
37-
RUN apt-get update
38-
RUN TENSORRT_MAJOR_VERSION=`echo ${TENSORRT_VERSION} | cut -d '.' -f 1` && \
36+
RUN apt-get update && TENSORRT_MAJOR_VERSION=`echo ${TENSORRT_VERSION} | cut -d '.' -f 1` && \
3937
apt-get install -y libnvinfer${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* \
4038
libnvinfer-plugin${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* \
4139
libnvinfer-dev=${TENSORRT_VERSION}.* \
@@ -55,9 +53,9 @@ FROM base as torch-tensorrt-builder-base
5553
ARG ARCH="x86_64"
5654
ARG TARGETARCH="amd64"
5755

58-
RUN apt-get update
59-
RUN apt-get install -y python3-setuptools
60-
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
56+
RUN apt-get update && \
57+
apt-get install -y python3-setuptools && \
58+
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
6159

6260
RUN apt-get update &&\
6361
apt-get install -y --no-install-recommends locales ninja-build &&\

py/torch_tensorrt/dynamo/conversion/converter_utils.py

Lines changed: 37 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -446,53 +446,47 @@ def create_constant(
446446
else:
447447
shape = list(torch_value.shape)
448448

449-
if torch_value is not None:
450-
451-
if torch_value.dtype == torch.uint8:
452-
if is_tensorrt_version_supported("10.8.0"):
453-
if (
454-
target_quantized_type is None
455-
or target_quantized_type != trt.DataType.FP4
456-
):
457-
# Iconstant layer does not support Uint8, it only support that FP4 data packed in uint8
458-
raise ValueError(
459-
"Currently supported target_quantized_type for uint8 is FP4, got {target_quantized_type=}"
460-
)
461-
shape[-1] = shape[-1] * 2
462-
weights = to_trt_weights(
463-
ctx,
464-
torch_value,
465-
name,
466-
"CONSTANT",
467-
"CONSTANT",
468-
dtype=trt.DataType.FP4,
469-
count=torch_value.numel() * 2,
470-
)
471-
constant = ctx.net.add_constant(
472-
shape,
473-
weights,
474-
)
475-
constant.name = name
476-
return constant.get_output(0)
477-
else:
449+
if torch_value.dtype == torch.uint8:
450+
if is_tensorrt_version_supported("10.8.0"):
451+
if (
452+
target_quantized_type is None
453+
or target_quantized_type != trt.DataType.FP4
454+
):
455+
# Iconstant layer does not support Uint8, it only support that FP4 data packed in uint8
478456
raise ValueError(
479-
"Currently FP4 is only supported in TensorRT 10.8.0 and above"
457+
"Currently supported target_quantized_type for uint8 is FP4, got {target_quantized_type=}"
480458
)
481-
# Record the weight in ctx for refit and cpu memory reference
459+
shape[-1] = shape[-1] * 2
460+
weights = to_trt_weights(
461+
ctx,
462+
torch_value,
463+
name,
464+
"CONSTANT",
465+
"CONSTANT",
466+
dtype=trt.DataType.FP4,
467+
count=torch_value.numel() * 2,
468+
)
469+
constant = ctx.net.add_constant(
470+
shape,
471+
weights,
472+
)
473+
constant.name = name
474+
return constant.get_output(0)
475+
else:
476+
raise ValueError(
477+
"Currently FP4 is only supported in TensorRT 10.8.0 and above"
478+
)
479+
# Record the weight in ctx for refit and cpu memory reference
482480

483-
# Convert the torch.Tensor to a trt.Weights object
484-
trt_weights = to_trt_weights(ctx, torch_value, name, "CONSTANT", "CONSTANT")
485-
constant = ctx.net.add_constant(
486-
shape,
487-
trt_weights,
488-
)
489-
constant.name = name
481+
# Convert the torch.Tensor to a trt.Weights object
482+
trt_weights = to_trt_weights(ctx, torch_value, name, "CONSTANT", "CONSTANT")
483+
constant = ctx.net.add_constant(
484+
shape,
485+
trt_weights,
486+
)
487+
constant.name = name
490488

491-
return constant.get_output(0)
492-
else:
493-
raise ValueError(
494-
f"Cannot convert tensor '{name}' to a TensorRT constant because its value is None."
495-
)
489+
return constant.get_output(0)
496490

497491

498492
def get_trt_tensor(

py/torch_tensorrt/dynamo/conversion/impl/deconv.py

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,16 @@ def deconvNd(
109109
assert len(kernel_shape) > 0, "Deconvolution kernel shape must be non-empty"
110110

111111
# add deconv layer
112+
if groups is not None:
113+
num_output_maps = num_output_maps * groups
112114
deconv_layer = ctx.net.add_deconvolution_nd(
113115
input=input,
114-
num_output_maps=num_output_maps * groups,
116+
num_output_maps=num_output_maps,
115117
kernel_shape=kernel_shape,
116118
kernel=trt.Weights() if isinstance(weight, TRTTensor) else weight,
117119
bias=trt.Weights() if isinstance(bias, TRTTensor) else bias,
118120
)
121+
assert deconv_layer is not None, "Deconvolution layer is None"
119122
set_layer_name(deconv_layer, target, name, source_ir)
120123

121124
# If the weight is a TRTTensor, set it as an input of the layer
@@ -145,7 +148,6 @@ def deconvNd(
145148
if output_padding is not None
146149
else output_padding
147150
)
148-
149151
# Set relevant attributes of deconvolution layer
150152
if padding is not None:
151153
deconv_layer.padding_nd = padding
@@ -156,19 +158,20 @@ def deconvNd(
156158
if groups is not None:
157159
deconv_layer.num_groups = groups
158160

159-
ndims = len(padding)
160-
pre_padding_values = []
161-
post_padding_values = []
161+
if padding is not None:
162+
ndims = len(padding)
163+
pre_padding_values = []
164+
post_padding_values = []
162165

163-
for dim in range(ndims):
164-
pre_padding = padding[dim]
165-
post_padding = padding[dim] - output_padding[dim]
166+
for dim in range(ndims):
167+
pre_padding = padding[dim]
168+
post_padding = padding[dim] - output_padding[dim]
166169

167-
pre_padding_values.append(pre_padding)
168-
post_padding_values.append(post_padding)
170+
pre_padding_values.append(pre_padding)
171+
post_padding_values.append(post_padding)
169172

170-
deconv_layer.pre_padding = tuple(pre_padding_values)
171-
deconv_layer.post_padding = tuple(post_padding_values)
173+
deconv_layer.pre_padding = tuple(pre_padding_values)
174+
deconv_layer.post_padding = tuple(post_padding_values)
172175

173176
result = deconv_layer.get_output(0)
174177

py/torch_tensorrt/dynamo/lowering/_decompositions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ def reduce_operation_with_scatter(
331331
scatter_tensor = initial_tensor
332332
else:
333333
# This case would not be encountered from torch itself
334-
print("Invalid Operation for Reduce op!!")
334+
raise ValueError(f"Invalid Operation for Reduce op: {self}")
335335

336336
operation_rhs = torch.scatter(scatter_tensor, dim, index_tensor, src_tensor)
337337
device = to_torch_device(scatter_tensor.device)

py/torch_tensorrt/dynamo/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -826,13 +826,13 @@ def get_output_metadata(
826826
return [node.meta for node in nodes]
827827

828828

829-
def get_output_dtypes(output: Any, truncate_doulbe: bool = False) -> List[dtype]:
829+
def get_output_dtypes(output: Any, truncate_double: bool = False) -> List[dtype]:
830830
output_dtypes = []
831831
if isinstance(output, torch.fx.node.Node):
832832
if "val" in output.meta:
833833
output_meta = output.meta["val"]
834834
if isinstance(output_meta, (FakeTensor, torch.Tensor)):
835-
if truncate_doulbe and output_meta.dtype == torch.float64:
835+
if truncate_double and output_meta.dtype == torch.float64:
836836
output_dtypes.append(dtype.float32)
837837
else:
838838
output_dtypes.append(dtype._from(output_meta.dtype))

py/torch_tensorrt/fx/converters/converter_utils.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,10 @@ def to_numpy(
173173
"""
174174
output = None
175175

176-
if value is None or isinstance(value, np.ndarray):
176+
if value is None:
177+
return None
178+
179+
elif isinstance(value, np.ndarray):
177180
output = value
178181

179182
elif isinstance(value, torch.Tensor):

py/torch_tensorrt/fx/tools/timing_cache_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,12 @@ def get_timing_cache_trt(self, timing_cache_file: str) -> bytearray:
2828
return None
2929

3030
def update_timing_cache(
31-
self, timing_cache_file: str, serilized_cache: bytearray
31+
self, timing_cache_file: str, serialized_cache: bytearray
3232
) -> None:
3333
if not self.save_timing_cache:
3434
return
3535
timing_cache_file = self.get_file_full_name(timing_cache_file)
3636
with open(timing_cache_file, "wb") as local_cache:
3737
local_cache.seek(0)
38-
local_cache.write(serilized_cache)
38+
local_cache.write(serialized_cache)
3939
local_cache.truncate()

0 commit comments

Comments
 (0)