Skip to content

Commit ef3e92d

Browse files
committed
update
1 parent 73a15c6 commit ef3e92d

File tree

4 files changed

+2
-10
lines changed

4 files changed

+2
-10
lines changed

.github/workflows/build-test-linux-x86_64.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ jobs:
426426
set -euo pipefail
427427
pushd .
428428
cd tests/py/dynamo
429-
python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/l2_dynamo_plugins_tests_results.xml automatic_plugin/
429+
python -m pytest -ra -n 0 --junitxml=${RUNNER_TEST_RESULTS_DIR}/l2_dynamo_plugins_tests_results.xml automatic_plugin/
430430
popd
431431
432432
L2-torchscript-tests:

tests/py/dynamo/automatic_plugin/test_automatic_plugin.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import os
21
import unittest
32
from typing import Tuple
43

@@ -13,9 +12,6 @@
1312

1413
from ..conversion.harness import DispatchTestCase
1514

16-
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
17-
18-
1915
@triton.jit
2016
def elementwise_mul_kernel(X, Y, Z, BLOCK_SIZE: tl.constexpr):
2117
# Program ID determines the block of data each thread will process

tests/py/dynamo/automatic_plugin/test_automatic_plugin_with_attrs.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import os
21
import unittest
32
from typing import Tuple
43

@@ -13,9 +12,6 @@
1312

1413
from ..conversion.harness import DispatchTestCase
1514

16-
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
17-
18-
1915
@triton.jit
2016
def elementwise_scale_mul_kernel(X, Y, Z, a, b, BLOCK_SIZE: tl.constexpr):
2117
pid = tl.program_id(0)

tests/py/dynamo/automatic_plugin/test_flashinfer_rmsnorm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def _(input: torch.Tensor, weight: torch.Tensor, b: float = 1e-6) -> torch.Tenso
3434
)
3535

3636

37-
# @unittest.skip("Not Available")
37+
@unittest.skip("Not Available")
3838
@unittest.skipIf(
3939
not importlib.util.find_spec("flashinfer")
4040
or torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,

0 commit comments

Comments
 (0)