|
7 | 7 | import logging |
8 | 8 | from src.wrappers.OsipiBase import OsipiBase |
9 | 9 | from utilities.data_simulation.GenerateData import GenerateData |
10 | | - |
11 | | -logging.basicConfig(level=logging.INFO) |
12 | 10 | logger = logging.getLogger(__name__) |
| 11 | +#run using python -m pytest from the root folder |
13 | 12 |
|
14 | | -# Define a global list to hold the test results |
15 | 13 | test_results = [] |
16 | 14 |
|
| 15 | + |
17 | 16 | def signal_helper(signal): |
18 | 17 | signal = np.asarray(signal) |
19 | 18 | signal = np.abs(signal) |
@@ -60,57 +59,38 @@ def data_ivim_fit_saved(): |
60 | 59 | tolerances = algorithm_dict.get("tolerances", {}) |
61 | 60 | yield name, bvals, data, algorithm, xfail, kwargs, tolerances |
62 | 61 |
|
| 62 | + |
63 | 63 | @pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", data_ivim_fit_saved()) |
64 | 64 | def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances, request): |
65 | 65 | global test_results |
66 | 66 | if xfail["xfail"]: |
67 | | - logger.info(xfail["xfail"]) |
68 | 67 | mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"]) |
69 | 68 | request.node.add_marker(mark) |
70 | 69 | fit = OsipiBase(algorithm=algorithm, **kwargs) |
71 | 70 | signal, ratio = signal_helper(data["data"]) |
| 71 | + |
72 | 72 | tolerances = tolerances_helper(tolerances, ratio, data["noise"]) |
73 | 73 | [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals) |
74 | | - |
75 | | - # Log the results and tolerances |
76 | | - logger.info(tolerances["rtol"]) |
77 | | - logger.info(tolerances["atol"]) |
78 | | - |
79 | 74 | def to_list_if_needed(value): |
80 | 75 | return value.tolist() if isinstance(value, np.ndarray) else value |
81 | | - |
82 | 76 | test_result = { |
83 | 77 | "name": name, |
84 | 78 | "algorithm": algorithm, |
85 | 79 | "f_fit": to_list_if_needed(f_fit), |
86 | 80 | "Dp_fit": to_list_if_needed(Dp_fit), |
87 | 81 | "D_fit": to_list_if_needed(D_fit), |
88 | | - |
| 82 | + "f": to_list_if_needed(data['f']), |
| 83 | + "Dp": to_list_if_needed(data['Dp']), |
| 84 | + "D": to_list_if_needed(data['D']), |
89 | 85 | "status": "PASSED" |
90 | 86 | } |
| 87 | + if xfail["xfail"]: |
| 88 | + test_result['status'] = "XFAILED" |
91 | 89 |
|
92 | | - try: |
93 | | - npt.assert_allclose(data['f'], f_fit, rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) |
94 | | - npt.assert_allclose(data['D'], D_fit, rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) |
95 | | - npt.assert_allclose(data['Dp'], Dp_fit, rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) |
96 | | - except AssertionError as e: |
97 | | - test_result["status"] = "FAILED" |
98 | | - test_result["error"] = str(e) |
99 | | - test_result["actual_values"] = { |
100 | | - "f": to_list_if_needed(f_fit), |
101 | | - "D": to_list_if_needed(D_fit), |
102 | | - "Dp": to_list_if_needed(Dp_fit) |
103 | | - } |
104 | | - test_result["desired_values"] = { |
105 | | - "f": data['f'], |
106 | | - "D": data['D'], |
107 | | - "Dp": data['Dp'] |
108 | | - } |
109 | | - logger.error(f"Test failed for {name} with algorithm {algorithm}: {e}") |
110 | | - |
111 | | - # Append the result to the test_results list |
112 | 90 | test_results.append(test_result) |
113 | 91 | with open('plots_data.json', 'w') as f: |
114 | 92 | json.dump({"results": test_results, "rtol": tolerances["rtol"], |
115 | 93 | "atol": tolerances["atol"], }, f, indent=4) |
116 | | - logger.info(test_results) |
| 94 | + npt.assert_allclose(data['f'], f_fit, rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) |
| 95 | + npt.assert_allclose(data['D'], D_fit, rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) |
| 96 | + npt.assert_allclose(data['Dp'], Dp_fit, rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) |
0 commit comments