Skip to content

Commit 1c444aa

Browse files
committed
chore: replace black with ruff and format all files
1 parent 35808e4 commit 1c444aa

File tree

64 files changed

+266
-444
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+266
-444
lines changed

examples/big_mono_tests.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,7 @@
2222

2323
counter = Clicker()
2424

25-
def _generate_test(
26-
model: type[AModelWithGenerator], o_borders: list[tuple[float, float]]
27-
) -> list[Test]:
25+
def _generate_test(model: type[AModelWithGenerator], o_borders: list[tuple[float, float]]) -> list[Test]:
2826
return generate_mono_test(
2927
model_t=model,
3028
params_borders=o_borders,
@@ -36,10 +34,7 @@ def _generate_test(
3634
tests_per_size=8,
3735
tests_per_cond=2,
3836
runs_per_test=1,
39-
solvers=[
40-
init_solver(16, 0.1, 0.001, 3, optimizer)
41-
for optimizer in TESTS_OPTIMIZERS
42-
],
37+
solvers=[init_solver(16, 0.1, 0.001, 3, optimizer) for optimizer in TESTS_OPTIMIZERS],
4338
)
4439

4540
tests += _generate_test(WeibullModelExp, [(0.25, 25), (0.25, 25)])

examples/config.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,7 @@
77

88
CPU_COUNT = os.cpu_count()
99
MAX_WORKERS_PERCENT = 0.75
10-
MAX_WORKERS = (
11-
min(round(CPU_COUNT * MAX_WORKERS_PERCENT), CPU_COUNT)
12-
if CPU_COUNT is not None
13-
else 1
14-
)
10+
MAX_WORKERS = min(round(CPU_COUNT * MAX_WORKERS_PERCENT), CPU_COUNT) if CPU_COUNT is not None else 1
1511
# MAX_WORKERS = 4
1612

1713
RESULTS_FOLDER = EXAMPLES / "results"

examples/diff_test.py

Lines changed: 6 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,7 @@
2828
for sp in gaussian_start_params:
2929
main_distr = list(gaussian.generate(np.array(sp), BASE_SIZE // 2, normalized=False))
3030
for second_sp in np.linspace(sp[0] - 5, sp[0] + 5, num=8, endpoint=True):
31-
x = main_distr + list(
32-
gaussian.generate(
33-
np.array((second_sp, 3.0)), BASE_SIZE // 2, normalized=False
34-
)
35-
)
31+
x = main_distr + list(gaussian.generate(np.array((second_sp, 3.0)), BASE_SIZE // 2, normalized=False))
3632
random.shuffle(x)
3733

3834
start_params_borders = [
@@ -45,12 +41,7 @@
4541
samples = random.sample(x, size)
4642
for _ in range(TESTS_PER_SIZE):
4743
start_params = [
48-
np.array(
49-
[
50-
random.uniform(border[0], border[1])
51-
for border in start_params_borders
52-
]
53-
)
44+
np.array([random.uniform(border[0], border[1]) for border in start_params_borders])
5445
for _ in range(2)
5546
]
5647
tests.append(
@@ -81,22 +72,15 @@
8172
]
8273
),
8374
),
84-
[
85-
init_solver(16, 0.1, 0.001, 3, optimizer)
86-
for optimizer in TESTS_OPTIMIZERS
87-
],
75+
[init_solver(16, 0.1, 0.001, 3, optimizer) for optimizer in TESTS_OPTIMIZERS],
8876
1,
8977
)
9078
)
9179

9280
for sp in weibull_start_params:
9381
main_distr = list(weibull.generate(np.array(sp), BASE_SIZE // 2, normalized=False))
9482
for second_sp in np.linspace(max(sp[0] - 5, 0.1), sp[0] + 5, num=8, endpoint=True):
95-
x = main_distr + list(
96-
weibull.generate(
97-
np.array((second_sp, 1.0)), BASE_SIZE // 2, normalized=False
98-
)
99-
)
83+
x = main_distr + list(weibull.generate(np.array((second_sp, 1.0)), BASE_SIZE // 2, normalized=False))
10084
random.shuffle(x)
10185

10286
start_params_borders = [
@@ -109,12 +93,7 @@
10993
samples = random.sample(x, size)
11094
for _ in range(TESTS_PER_SIZE):
11195
start_params = [
112-
np.array(
113-
[
114-
random.uniform(border[0], border[1])
115-
for border in start_params_borders
116-
]
117-
)
96+
np.array([random.uniform(border[0], border[1]) for border in start_params_borders])
11897
for _ in range(2)
11998
]
12099
tests.append(
@@ -145,10 +124,7 @@
145124
]
146125
),
147126
),
148-
[
149-
init_solver(16, 0.1, 0.001, 3, optimizer)
150-
for optimizer in TESTS_OPTIMIZERS
151-
],
127+
[init_solver(16, 0.1, 0.001, 3, optimizer) for optimizer in TESTS_OPTIMIZERS],
152128
1,
153129
)
154130
)

examples/mono_test_generator.py

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""Module which contains mixture distributions of single model tests generator"""
22

33
import random
4-
from typing import Iterable
4+
from collections.abc import Iterable
55

66
import numpy as np
77

@@ -42,9 +42,7 @@ def generate_mono_test(
4242
models: list[AModel] = []
4343

4444
for _ in range(k):
45-
params = np.array(
46-
[random.uniform(border[0], border[1]) for border in params_borders]
47-
)
45+
params = np.array([random.uniform(border[0], border[1]) for border in params_borders])
4846
model = model_t()
4947
x += list(model.generate(params, per_model, normalized=False))
5048

@@ -62,12 +60,7 @@ def generate_mono_test(
6260
samples = random.sample(x, size)
6361
for _ in range(tests_per_size):
6462
start_params = [
65-
np.array(
66-
[
67-
random.uniform(border[0], border[1])
68-
for border in start_params_borders
69-
]
70-
)
63+
np.array([random.uniform(border[0], border[1]) for border in start_params_borders])
7164
for _ in range(k)
7265
]
7366
tests.append(
@@ -91,9 +84,7 @@ def generate_mono_test(
9184
model,
9285
params,
9386
)
94-
for model, params in zip(
95-
models, start_params
96-
)
87+
for model, params in zip(models, start_params)
9788
]
9889
),
9990
),

examples/prepare_result.py

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,7 @@ def nll(samples: Samples, mixture: MixtureDistribution) -> float:
2121
return occur
2222

2323

24-
def identity_guessing_chance(
25-
dx: MixtureDistribution, dy: MixtureDistribution, sample: Samples
26-
):
24+
def identity_guessing_chance(dx: MixtureDistribution, dy: MixtureDistribution, sample: Samples):
2725
"""Identity guessing chance metric"""
2826

2927
dxs = list(dx)
@@ -64,9 +62,7 @@ def result_to_df_diff(result: SingleSolverResult):
6462
dct[clicker.click()] = (sp[0], second_sp)
6563

6664
for sp in weibull_start_params:
67-
for second_sp in np.linspace(
68-
max(sp[0] - 5, 0.1), sp[0] + 5, num=8, endpoint=True
69-
):
65+
for second_sp in np.linspace(max(sp[0] - 5, 0.1), sp[0] + 5, num=8, endpoint=True):
7066
for _ in sizes:
7167
for _ in range(tests_per_cond):
7268
for _ in range(tests_per_size):
@@ -76,7 +72,7 @@ def result_to_df_diff(result: SingleSolverResult):
7672
[
7773
(
7874
d
79-
if (d.prior_probability is not None) and (d.prior_probability > 0.001)
75+
if (d.prior_probability is not None) and (d.prior_probability > 0.001) # noqa: PLR2004
8076
else DistributionInMixture(d.model, d.params, None)
8177
)
8278
for d in result.result.content
@@ -100,14 +96,12 @@ def result_to_df_diff(result: SingleSolverResult):
10096
"time": result.time,
10197
"model": result.test.true_mixture[0].model.name,
10298
"size": len(result.test.problem.samples),
103-
"success": (result.steps < 128) and not failed,
99+
"success": (result.steps < 128) and not failed, # noqa: PLR2004
104100
"failed": failed,
105101
"occur": nll(result.test.all_data, mixture_distribution),
106102
"start": start,
107103
"diff": diff,
108-
"res_err": identity_guessing_chance(
109-
result.test.true_mixture, result.result.content, result.test.all_data
110-
),
104+
"res_err": identity_guessing_chance(result.test.true_mixture, result.result.content, result.test.all_data),
111105
}
112106

113107

@@ -131,7 +125,7 @@ def result_to_df(result: SingleSolverResult):
131125
[
132126
(
133127
d
134-
if (d.prior_probability is not None) and (d.prior_probability > 0.001)
128+
if (d.prior_probability is not None) and (d.prior_probability > 0.001) # noqa: PLR2004
135129
else DistributionInMixture(d.model, d.params, None)
136130
)
137131
for d in result.result.content
@@ -152,7 +146,7 @@ def result_to_df(result: SingleSolverResult):
152146
"time": result.time,
153147
"model": result.test.true_mixture[0].model.name,
154148
"size": len(result.test.problem.samples),
155-
"success": (result.steps < 16) and failed,
149+
"success": (result.steps < 16) and failed, # noqa: PLR2004
156150
"failed": failed,
157151
"occur": nll(result.test.all_data, mixture_distribution),
158152
}

examples/quick_test.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,7 @@ def run_test():
2626

2727
counter = Clicker()
2828

29-
def _generate_test(
30-
model: type[AModelWithGenerator], params_borders: list[tuple[float, float]]
31-
) -> list[Test]:
29+
def _generate_test(model: type[AModelWithGenerator], params_borders: list[tuple[float, float]]) -> list[Test]:
3230
test = generate_mono_test(
3331
model_t=model,
3432
clicker=counter,
@@ -40,10 +38,7 @@ def _generate_test(
4038
tests_per_size=1,
4139
tests_per_cond=1,
4240
runs_per_test=1,
43-
solvers=[
44-
init_solver(16, 0.1, 0.001, 3, optimizer)
45-
for optimizer in ALL_OPTIMIZERS
46-
],
41+
solvers=[init_solver(16, 0.1, 0.001, 3, optimizer) for optimizer in ALL_OPTIMIZERS],
4742
)
4843
return test
4944

examples/utils.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import random
55
import time
66
from functools import partial
7-
from typing import Callable, NamedTuple
7+
from typing import ClassVar, NamedTuple
88

99
import numpy as np
1010
from tqdm.contrib.concurrent import process_map
@@ -48,7 +48,7 @@ class SingleSolverResult(NamedTuple):
4848
steps: int
4949
time: float
5050

51-
log: list[EM.Log.Item] = []
51+
log: ClassVar[list[EM.Log.Item]] = []
5252

5353

5454
class TestResult(NamedTuple):
@@ -133,7 +133,10 @@ def run_tests(
133133
)
134134

135135
if shuffled:
136-
key: Callable[[TestResult], int] = lambda t: t.test.index
136+
137+
def key(t):
138+
return t.test.index
139+
137140
results.sort(key=key)
138141

139142
return results
@@ -170,8 +173,6 @@ def init_solver(
170173
return EM(
171174
breakpointer,
172175
FiniteChecker()
173-
+ PriorProbabilityThresholdChecker(
174-
prior_probability_threshold, prior_probability_threshold_step
175-
),
176+
+ PriorProbabilityThresholdChecker(prior_probability_threshold, prior_probability_threshold_step),
176177
method,
177178
)

experimental_env/analysis/analysis.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
""" A module that provides a class for performing the third stage of the experiment """
1+
"""A module that provides a class for performing the third stage of the experiment"""
2+
23
from pathlib import Path
34

45
from tqdm import tqdm
@@ -33,7 +34,8 @@ def analyze(self, results: ParserOutput, method: str):
3334
"""
3435
A function for analyzing the method
3536
36-
:param results: The result of the method on the second stage of the experiment, which was obtained using a parser.
37+
:param results: The result of the method on the second stage of the experiment,
38+
which was obtained using a parser.
3739
:param method: Name of analyzed method
3840
"""
3941
method_dir: Path = self._out_dir.joinpath(method)
@@ -48,9 +50,7 @@ def analyze(self, results: ParserOutput, method: str):
4850
with tqdm(total=len(exp_descriptions)) as pbar:
4951
for exp_descr in exp_descriptions:
5052
pbar.update()
51-
exp_dir: Path = mixture_dir.joinpath(
52-
f"experiment_{exp_descr.exp_num}"
53-
)
53+
exp_dir: Path = mixture_dir.joinpath(f"experiment_{exp_descr.exp_num}")
5454

5555
for action in self._actions:
5656
action.set_path(exp_dir)
@@ -66,21 +66,21 @@ def compare(
6666
"""
6767
A function for comparing the methods
6868
69-
:param results_1: The result of the first method on the second stage of the experiment, which was obtained using a parser.
70-
:param results_2: The result of the second method on the second stage of the experiment, which was obtained using a parser.
69+
:param results_1: The result of the first method on the second stage of the experiment,
70+
which was obtained using a parser.
71+
:param results_2: The result of the second method on the second stage of the experiment,
72+
which was obtained using a parser.
7173
:param method_1: Name of the first method
7274
:param method_2: Name of the second method
7375
"""
7476
method_dir = self._out_dir.joinpath(f"{method_1} + {method_2}")
7577

76-
for mixture_name in results_1.keys():
78+
for mixture_name in results_1:
7779
mixture_dir = method_dir.joinpath(mixture_name)
7880

7981
for summarizer in self._summarizers:
8082
summarizer.set_path(mixture_dir)
81-
summarizer.compare_methods(
82-
results_1[mixture_name], results_2[mixture_name], method_1, method_2
83-
)
83+
summarizer.compare_methods(results_1[mixture_name], results_2[mixture_name], method_1, method_2)
8484

8585
with tqdm(total=len(results_1[mixture_name])) as pbar:
8686
for res in zip(results_1[mixture_name], results_2[mixture_name]):
@@ -89,6 +89,4 @@ def compare(
8989

9090
for action in self._actions:
9191
action.set_path(exp_dir)
92-
action.overall_compare_methods(
93-
res[0], res[1], method_1, method_2
94-
)
92+
action.overall_compare_methods(res[0], res[1], method_1, method_2)

experimental_env/analysis/analyze_strategies/analysis_strategy.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
""" A module containing an abstract strategy class for analysis methods. """
1+
"""A module containing an abstract strategy class for analysis methods."""
22
# pylint: disable=duplicate-code
33

44
from abc import ABC, abstractmethod
@@ -9,7 +9,8 @@
99

1010
class AnalysisStrategy(ABC):
1111
"""
12-
An abstract strategy class that contains methods for analysis and comparison, as well as a method for establishing a directory
12+
An abstract strategy class that contains methods for analysis and comparison,
13+
as well as a method for establishing a directory
1314
"""
1415

1516
def __init__(self):
@@ -39,7 +40,8 @@ def overall_analyze_method(self, result: ExperimentDescription, method: str):
3940
"""
4041
Analyze the method result of the experiment
4142
42-
:param result: The result of the method on the second stage of the experiment, which was obtained using a parser.
43+
:param result: The result of the method on the second stage of the experiment,
44+
which was obtained using a parser.
4345
:param method: The name of the method that we are analyzing
4446
"""
4547

@@ -56,8 +58,10 @@ def overall_compare_methods(
5658
"""
5759
A function for comparing the methods by two results, with the same base mixture
5860
59-
:param result_1: The result of the first method on the second stage of the experiment, which was obtained using a parser.
60-
:param result_2: The result of the second method on the second stage of the experiment, which was obtained using a parser.
61+
:param result_1: The result of the first method on the second stage of the experiment,
62+
which was obtained using a parser.
63+
:param result_2: The result of the second method on the second stage of the experiment,
64+
which was obtained using a parser.
6165
:param method_1: Name of the first method
6266
:param method_2: Name of the second method
6367
"""

experimental_env/analysis/analyze_strategies/density_plot.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
""" A module providing a class for saving a distribution density graph. """
1+
"""A module providing a class for saving a distribution density graph."""
22

33
import matplotlib.pyplot as plt
44
import numpy as np

0 commit comments

Comments
 (0)