Skip to content

Commit b8c56f0

Browse files
committed
better org/3
1 parent bd44649 commit b8c56f0

File tree

1 file changed

+31
-11
lines changed

1 file changed

+31
-11
lines changed

flarestack/core/results.py

Lines changed: 31 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -123,20 +123,33 @@ def merge_and_load(self, output_dict: dict):
123123
background_label = scale_shortener(0.0)
124124

125125
for subdir_name in scales_subdirs:
126-
scale_label = scale_shortener(float(subdir_name))
126+
try:
127+
scale_label = scale_shortener(float(subdir_name))
128+
except ValueError as e:
129+
# If analysis paths are nested, i.e. we have analyses ana1 and ana1/sub1, the ana1/sub1 directory will be scanned, but should be skipped. Ideally the user should avoid nesting analysis directories, but there is no safeguard against this behaviour.
130+
logger.debug(
131+
f"Skipping subdirectory {subdir_name} as it does not represent a valid scale. Parent directory: {self.path}"
132+
)
133+
continue
127134

128135
if self.background_only and scale_label != background_label:
129-
# skip non-background trials
136+
# skip non-background trials for background_only mode
130137
continue
131138

132139
pending_data = self.merge_and_load_subdir(subdir_name)
133140

134141
if pending_data:
142+
n_pending = len(pending_data["TS"])
143+
135144
if scale_label == background_label and background_label in output_dict:
136-
logger.info("Appending background data to existing trials.")
145+
logger.info(f"Appending f{n_pending} background data to {len(output_dict[background_label]['TS'])} existing trials ({scale_label=})")
137146
self.merge_datadict(output_dict[background_label], pending_data)
138147
else:
139148
output_dict[scale_label] = pending_data
149+
if self.background_only:
150+
logger.info(
151+
f"Loading {n_pending} background trials ({scale_label=})"
152+
)
140153

141154

142155
class ResultsHandler(object):
@@ -153,10 +166,7 @@ def __init__(
153166

154167
self.name = rh_dict["name"]
155168

156-
if background_source is not None:
157-
self.background_from = background_source
158-
else:
159-
self.background_from = rh_dict["name"]
169+
self.background_source = background_source
160170

161171
self.mh_name = rh_dict["mh_name"]
162172

@@ -170,10 +180,11 @@ def __init__(
170180
self.results = dict()
171181

172182
self.pickle_output_dir = name_pickle_output_dir(self.name)
173-
self.pickle_output_dir_bg = name_pickle_output_dir(self.background_from)
183+
self.pickle_output_dir_bg = name_pickle_output_dir(self.background_source) if self.background_source else None
174184

175185
self.pickle_cache = PickleCache(Path(self.pickle_output_dir))
176-
self.pickle_cache_bg = PickleCache(Path(self.pickle_output_dir_bg))
186+
187+
self.pickle_cache_bg = PickleCache(Path(self.pickle_output_dir_bg), background_only=True) if self.background_source else None
177188

178189
self.plot_path = Path(plot_output_dir(self.name))
179190

@@ -228,13 +239,21 @@ def __init__(
228239
try:
229240
self.pickle_cache.merge_and_load(output_dict=self.results)
230241
# Load the background trials. Will override the existing one.
231-
self.pickle_cache_bg.merge_and_load(output_dict=self.results)
242+
if self.pickle_cache_bg is not None:
243+
print("NOTE!!!! Loading BG")
244+
self.pickle_cache_bg.merge_and_load(output_dict=self.results)
245+
else:
246+
print("NOTE!!!! No BG pickle cache")
232247
if not self.results:
233248
logger.warning("No data was found by ResultsHandler object! \n")
234249
logger.warning(
235250
"Tried root directory: \n {0} \n ".format(self.pickle_output_dir)
236251
)
237252
sys.exit()
253+
if not scale_shortener(0.0) in self.results:
254+
logger.error(f"No key equal to '0' in results! Keys are {self.results.keys()}")
255+
256+
sys.exit()
238257

239258
except FileNotFoundError:
240259
logger.warning(f"No files found at {self.pickle_output_dir}")
@@ -726,7 +745,7 @@ def find_disc_potential(self):
726745
)
727746

728747
logger.info(
729-
f"Scale: {scale}, TS_threshold: {disc_threshold}, n_trials: {len(ts_array)} => overfluctuations {frac=}"
748+
f"Scale: {scale}, TS_threshold: {disc_threshold:.1f}, n_trials: {len(ts_array)} => overfluctuations {frac=:.4f}"
730749
)
731750

732751
y[zval].append(frac)
@@ -1011,6 +1030,7 @@ def plot_bias(self):
10111030
for scale in raw_x:
10121031
vals = self.results[scale]["Parameters"][param]
10131032

1033+
10141034
if self.bias_error == "std":
10151035
med = np.median(vals)
10161036
meds.append(med)

0 commit comments

Comments
 (0)