Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 53 additions & 1 deletion test/xpu/run_test_with_only.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,59 @@ def launch_test(test_case, skip_list=None, exe_list=None):
"test_comprehensive_nn_functional_nll_loss_xpu_float64",
"bincount",
)
res += launch_test("test_decomp_xpu.py", exe_list=execute_list)
skip_list = (
"test_comprehensive_baddbmm_xpu_float64",
"test_comprehensive_logspace_tensor_overload_xpu_int16",
"test_comprehensive_logspace_tensor_overload_xpu_int32",
"test_comprehensive_logspace_tensor_overload_xpu_int64",
"test_comprehensive_logspace_xpu_int16",
"test_comprehensive_logspace_xpu_int32",
"test_comprehensive_logspace_xpu_int64",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_bfloat16",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex128",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex32",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex64",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float16",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float32",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float64",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_bfloat16",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex128",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex32",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex64",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float16",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float32",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float64",
"test_comprehensive_nn_functional_instance_norm_xpu_float64",
"test_comprehensive_nn_functional_nll_loss_xpu_float16",
"test_comprehensive_nn_functional_pad_reflect_xpu_bfloat16",
"test_comprehensive_torch_ops_aten__flash_attention_forward_xpu_float16",
"test_comprehensive_vdot_xpu_complex128",
"test_comprehensive_vdot_xpu_complex64",
"test_quick_addmm_xpu_float64",
"test_quick_baddbmm_xpu_float64",
"test_quick_core_backward_baddbmm_xpu_float64",
"test_quick_core_backward_mv_xpu_float64",
"test_quick_logspace_tensor_overload_xpu_int16",
"test_quick_logspace_tensor_overload_xpu_int32",
"test_quick_logspace_tensor_overload_xpu_int64",
"test_quick_logspace_xpu_int16",
"test_quick_logspace_xpu_int32",
"test_quick_logspace_xpu_int64",
"test_quick_vdot_xpu_complex128",
"test_quick_vdot_xpu_complex64",
"test_exponential_non_inf_xpu",
"test_aten_core_operators",
"test_has_decomposition",
"test_comprehensive_diff_xpu_complex128",
"test_comprehensive_ormqr_xpu_complex128",
"test_quick_var_mean_xpu_float64",
"test_comprehensive_diff_xpu_complex64",
"test_comprehensive_ormqr_xpu_complex64",
"test_quick_mean_xpu_complex128",
"test_comprehensive_grid_sampler_2d_xpu_bfloat16",
)
# res += launch_test("test_decomp_xpu.py", exe_list=execute_list)
res += launch_test("test_decomp.py", skip_list=skip_list)

if os.name == "nt":
sys.exit(res)
Expand Down
91 changes: 87 additions & 4 deletions test/xpu/run_test_with_skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,29 +13,112 @@
default="selected",
help="Test cases scope",
)
# Add skip-cases parameter to import window skip dictionary
parser.add_argument(
"--skip-cases",
action="store_true",
default=False,
help="Use window skip dictionary for test cases",
)
args = parser.parse_args()


def should_skip_entire_file(skip_list):
"""Check if the skip list contains any entire file skip pattern (*.py::)"""
if not skip_list:
return False
return any(item.endswith(".py::") for item in skip_list)


# Import window skip dictionary if skip-cases is True
if args.skip_cases:
try:
# Import the window skip dictionary module
from window_skip_dict import skip_dict as window_skip_dict

# Merge the window skip dictionary with the default one using intelligent strategy
merged_skip_dict = {}

# First, copy all keys from default skip_dict
for key in skip_dict:
merged_skip_dict[key] = skip_dict[key].copy() if skip_dict[key] else []

# Then merge with window_skip_dict using intelligent strategy
for key in window_skip_dict:
window_skip_list = window_skip_dict[key]

if key in merged_skip_dict:
default_skip_list = merged_skip_dict[key]

# Intelligent merge strategy:
if should_skip_entire_file(window_skip_list):
# If Windows wants to skip entire file, use ONLY Windows skip list
merged_skip_dict[key] = window_skip_list
print(
f"Windows entire file skip detected for {key}, using: {window_skip_list}"
)
else:
# Otherwise, merge both lists and remove duplicates
combined_list = default_skip_list + [
item
for item in window_skip_list
if item not in default_skip_list
]
merged_skip_dict[key] = combined_list
print(f"Windows merging skip lists for {key}: {combined_list}")
else:
# Add new key-value pair from window_skip_dict
merged_skip_dict[key] = window_skip_list
print(f"Windows adding new skip key: {key} with {window_skip_list}")

print("Using intelligently merged skip dictionary")

except ImportError:
print(
"Warning: window_skip_dict module not found, using default skip dictionary"
)
merged_skip_dict = skip_dict
except Exception as e:
print(f"Error importing window skip dictionary: {e}")
merged_skip_dict = skip_dict
else:
merged_skip_dict = skip_dict
print("Using default skip dictionary")

res = 0
fail_test = []

for key in skip_dict:
skip_list = skip_dict[key]
for key in merged_skip_dict:
skip_list = merged_skip_dict[key]
exe_list = None

if args.test_cases == "skipped":
# When running only skipped cases, use skip_list as exe_list
exe_list = skip_list
skip_list = None
if exe_list is None:
if not exe_list: # Check if exe_list is empty
print(f"Skipping {key} as no tests to execute")
continue
elif args.test_cases == "all":
# When running all cases, don't skip any
skip_list = None
# For "selected" case, use the skip_list as is

print(f"Running test case: {key}")
if skip_list:
print(f"Skip list: {skip_list}")
if exe_list:
print(f"Execute list: {exe_list}")

fail = launch_test(key, skip_list=skip_list, exe_list=exe_list)
res += fail
if fail:
fail_test.append(key)

if fail_test:
print(",".join(fail_test) + " have failures")

else:
print("All tests passed!")

if os.name == "nt":
sys.exit(res)
Expand Down
64 changes: 64 additions & 0 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,70 @@
# CUDA specific case
"test_cufft_plan_cache_xpu_float64",
),
"test_decomp.py": (
# AssertionError: Tensor-likes are not close! ; Exception: Tensor-likes are not close!
"test_comprehensive_baddbmm_xpu_float64",
"test_comprehensive_logspace_tensor_overload_xpu_int16",
"test_comprehensive_logspace_tensor_overload_xpu_int32",
"test_comprehensive_logspace_tensor_overload_xpu_int64",
"test_comprehensive_logspace_xpu_int16",
"test_comprehensive_logspace_xpu_int32",
"test_comprehensive_logspace_xpu_int64",
# RuntimeError: could not create a primitive descriptor for the deconvolution forward propagation primitive.
"test_comprehensive_nn_functional_conv_transpose2d_xpu_bfloat16",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex128",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex32",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_complex64",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float16",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float32",
"test_comprehensive_nn_functional_conv_transpose2d_xpu_float64",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_bfloat16",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex128",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex32",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_complex64",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float16",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float32",
"test_comprehensive_nn_functional_conv_transpose3d_xpu_float64",
# AssertionError: Tensor-likes are not close! ; Exception: Tensor-likes are not close!
"test_comprehensive_nn_functional_instance_norm_xpu_float64",
# RuntimeError: Difference from float64 is larger with decomposition nll_loss_forward.default than original on output 0.
"test_comprehensive_nn_functional_nll_loss_xpu_float16",
"test_comprehensive_nn_functional_pad_reflect_xpu_bfloat16",
# NotImplementedError: Could not run 'aten::_flash_attention_forward' with arguments from the 'CPU' backend.
"test_comprehensive_torch_ops_aten__flash_attention_forward_xpu_float16",
# AssertionError: Scalars are not close! ; Exception: Scalars are not close!
"test_comprehensive_vdot_xpu_complex128",
"test_comprehensive_vdot_xpu_complex64",
# AssertionError: Tensor-likes are not close! ; Exception: Tensor-likes are not close!
"test_quick_addmm_xpu_float64",
"test_quick_baddbmm_xpu_float64",
"test_quick_core_backward_baddbmm_xpu_float64",
# Exception: Jacobian mismatch for output 0 with respect to input 0
"test_quick_core_backward_mv_xpu_float64",
# AssertionError: Tensor-likes are not equal! ; Exception: Tensor-likes are not equal!
"test_quick_logspace_tensor_overload_xpu_int16",
"test_quick_logspace_tensor_overload_xpu_int32",
"test_quick_logspace_tensor_overload_xpu_int64",
"test_quick_logspace_xpu_int16",
"test_quick_logspace_xpu_int32",
"test_quick_logspace_xpu_int64",
# AssertionError: Scalars are not close! ; Exception: Scalars are not close!
"test_quick_vdot_xpu_complex128",
"test_quick_vdot_xpu_complex64",
# AssertionError: Tensor-likes are not close!
"test_exponential_non_inf_xpu",
# RuntimeError: I got this output for HasDecompTest.test_aten_core_operators:
"test_aten_core_operators",
"test_has_decomposition",
# AssertionError: Tensor-likes are not close!
"test_comprehensive_diff_xpu_complex128",
"test_comprehensive_ormqr_xpu_complex128",
"test_quick_var_mean_xpu_float64",
"test_comprehensive_diff_xpu_complex64",
"test_comprehensive_ormqr_xpu_complex64",
"test_quick_mean_xpu_complex128",
"test_comprehensive_grid_sampler_2d_xpu_bfloat16",
),
"functorch/test_ops_xpu.py": None,
"test_sparse_xpu.py": None,
"test_sparse_csr_xpu.py": None,
Expand Down
Loading