Skip to content

Commit a768dcb

Browse files
authored
Save test results to a json file.
1 parent b099579 commit a768dcb

File tree

1 file changed

+49
-107
lines changed

1 file changed

+49
-107
lines changed

tests/IVIMmodels/unit_tests/test_ivim_fit.py

Lines changed: 49 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -4,113 +4,15 @@
44
import json
55
import pathlib
66
import os
7-
7+
import logging
88
from src.wrappers.OsipiBase import OsipiBase
99
from utilities.data_simulation.GenerateData import GenerateData
1010

11-
#run using python -m pytest from the root folder
12-
13-
14-
# @pytest.fixture
15-
# def algorithm_fixture()
16-
# def test_fixtures()
17-
18-
# use a fixture to generate data
19-
# either read a config file for the test or perhaps hard code a few fixtures and usefixtures in the config?
20-
# use a fixture to save data
21-
22-
# def algorithm_list():
23-
# # Find the algorithms from algorithms.json
24-
# file = pathlib.Path(__file__)
25-
# algorithm_path = file.with_name('algorithms.json')
26-
# with algorithm_path.open() as f:
27-
# algorithm_information = json.load(f)
28-
# return algorithm_information["algorithms"]
29-
30-
# @pytest.fixture(params=algorithm_list())
31-
# def algorithm_fixture(request):
32-
# # assert request.param == "algorithms"
33-
# yield request.param
34-
35-
36-
37-
# @pytest.fixture(params=SNR)
38-
# def noise_fixture(request):
39-
# return request.config.getoption("--noise")
40-
41-
# @pytest.fixture
42-
# def noise_fixture(request):
43-
# yield request.param
11+
logging.basicConfig(level=logging.INFO)
12+
logger = logging.getLogger(__name__)
4413

45-
# @pytest.mark.parametrize("S", [SNR])
46-
# @pytest.mark.parametrize("D, Dp, f, bvals", [[0.0015, 0.1, 0.11000000000000007,[0, 5, 10, 50, 100, 200, 300, 500, 1000]]])
47-
# def test_generated(ivim_algorithm, ivim_data, SNR):
48-
# S0 = 1
49-
# gd = GenerateData()
50-
# name, bvals, data = ivim_data
51-
# D = data["D"]
52-
# f = data["f"]
53-
# Dp = data["Dp"]
54-
# if "data" not in data:
55-
# signal = gd.ivim_signal(D, Dp, f, S0, bvals, SNR)
56-
# else:
57-
# signal = data["data"]
58-
# fit = OsipiBase(algorithm=ivim_algorithm)
59-
# [f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals)
60-
# npt.assert_allclose([f, D, Dp], [f_fit, D_fit, Dp_fit])
61-
62-
63-
64-
# test_linear_data = [
65-
# pytest.param(0, np.linspace(0, 1000, 11), id='0'),
66-
# pytest.param(0.01, np.linspace(0, 1000, 11), id='0.1'),
67-
# pytest.param(0.02, np.linspace(0, 1000, 11), id='0.2'),
68-
# pytest.param(0.03, np.linspace(0, 1000, 11), id='0.3'),
69-
# pytest.param(0.04, np.linspace(0, 1000, 11), id='0.4'),
70-
# pytest.param(0.05, np.linspace(0, 1000, 11), id='0.5'),
71-
# pytest.param(0.08, np.linspace(0, 1000, 11), id='0.8'),
72-
# pytest.param(0.1, np.linspace(0, 1000, 11), id='1'),
73-
# ]
74-
75-
#@pytest.mark.parametrize("D, bvals", test_linear_data)
76-
#def test_linear_fit(D, bvals):
77-
#gd = GenerateData()
78-
#gd_signal = gd.exponential_signal(D, bvals)
79-
#print(gd_signal)
80-
#fit = LinearFit()
81-
#D_fit = fit.linear_fit(bvals, np.log(gd_signal))
82-
#npt.assert_allclose([1, D], D_fit)
83-
84-
# test_ivim_data = [
85-
# pytest.param(0, 0.01, 0.05, np.linspace(0, 1000, 11), id='0'),
86-
# pytest.param(0.1, 0.01, 0.05, np.linspace(0, 1000, 11), id='0.1'),
87-
# pytest.param(0.2, 0.01, 0.05, np.linspace(0, 1000, 11), id='0.2'),
88-
# pytest.param(0.1, 0.05, 0.1, np.linspace(0, 1000, 11), id='0.3'),
89-
# pytest.param(0.4, 0.001, 0.05, np.linspace(0, 1000, 11), id='0.4'),
90-
# pytest.param(0.5, 0.001, 0.05, np.linspace(0, 1000, 11), id='0.5'),
91-
# ]
92-
93-
#@pytest.mark.parametrize("f, D, Dp, bvals", test_ivim_data)
94-
#def test_ivim_fit(f, D, Dp, bvals):
95-
## We should make a wrapper that runs this for a range of different settings, such as b thresholds, bounds, etc.
96-
## An additional inputs to these functions could perhaps be a "settings" class with attributes that are the settings to the
97-
## algorithms. I.e. bvalues, thresholds, bounds, initial guesses.
98-
## That way, we can write something that defines a range of settings, and then just run them through here.
99-
100-
#gd = GenerateData()
101-
#gd_signal = gd.ivim_signal(D, Dp, f, 1, bvals)
102-
103-
##fit = LinearFit() # This is the old code by ETP
104-
#fit = ETP_SRI_LinearFitting() # This is the standardized format by IAR, which every algorithm will be implemented with
105-
106-
#[f_fit, Dp_fit, D_fit] = fit.ivim_fit(gd_signal, bvals) # Note that I have transposed Dp and D. We should decide on a standard order for these. I usually go with f, Dp, and D ordered after size.
107-
#npt.assert_allclose([f, D], [f_fit, D_fit], atol=1e-5)
108-
#if not np.allclose(f, 0):
109-
#npt.assert_allclose(Dp, Dp_fit, rtol=1e-2, atol=1e-3)
110-
111-
112-
# convert the algorithm list and signal list to fixtures that read from the files into params (scope="session")
113-
# from that helpers can again parse the files?
14+
# Define a global list to hold the test results
15+
test_results = []
11416

11517
def signal_helper(signal):
11618
signal = np.asarray(signal)
@@ -158,17 +60,57 @@ def data_ivim_fit_saved():
15860
tolerances = algorithm_dict.get("tolerances", {})
15961
yield name, bvals, data, algorithm, xfail, kwargs, tolerances
16062

161-
16263
@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", data_ivim_fit_saved())
16364
def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances, request):
65+
global test_results
16466
if xfail["xfail"]:
67+
logger.info(xfail["xfail"])
16568
mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"])
16669
request.node.add_marker(mark)
16770
fit = OsipiBase(algorithm=algorithm, **kwargs)
16871
signal, ratio = signal_helper(data["data"])
16972
tolerances = tolerances_helper(tolerances, ratio, data["noise"])
17073
[f_fit, Dp_fit, D_fit] = fit.osipi_fit(signal, bvals)
171-
npt.assert_allclose(data['f'], f_fit, rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
172-
npt.assert_allclose(data['D'], D_fit, rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
173-
npt.assert_allclose(data['Dp'], Dp_fit, rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
17474

75+
# Log the results and tolerances
76+
logger.info(tolerances["rtol"])
77+
logger.info(tolerances["atol"])
78+
79+
def to_list_if_needed(value):
80+
return value.tolist() if isinstance(value, np.ndarray) else value
81+
82+
test_result = {
83+
"name": name,
84+
"algorithm": algorithm,
85+
"f_fit": to_list_if_needed(f_fit),
86+
"Dp_fit": to_list_if_needed(Dp_fit),
87+
"D_fit": to_list_if_needed(D_fit),
88+
89+
"status": "PASSED"
90+
}
91+
92+
try:
93+
npt.assert_allclose(data['f'], f_fit, rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"])
94+
npt.assert_allclose(data['D'], D_fit, rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"])
95+
npt.assert_allclose(data['Dp'], Dp_fit, rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"])
96+
except AssertionError as e:
97+
test_result["status"] = "FAILED"
98+
test_result["error"] = str(e)
99+
test_result["actual_values"] = {
100+
"f": to_list_if_needed(f_fit),
101+
"D": to_list_if_needed(D_fit),
102+
"Dp": to_list_if_needed(Dp_fit)
103+
}
104+
test_result["desired_values"] = {
105+
"f": data['f'],
106+
"D": data['D'],
107+
"Dp": data['Dp']
108+
}
109+
logger.error(f"Test failed for {name} with algorithm {algorithm}: {e}")
110+
111+
# Append the result to the test_results list
112+
test_results.append(test_result)
113+
with open('plots_data.json', 'w') as f:
114+
json.dump({"results": test_results, "rtol": tolerances["rtol"],
115+
"atol": tolerances["atol"], }, f, indent=4)
116+
logger.info(test_results)

0 commit comments

Comments
 (0)