4949 - `MULTI_LABEL`
5050 - `OTHER_LABEL`
5151 They must be used as `label` for spike recorders and multimeters, respectively,
52- or for other files for output data (CSV files). They are format strings expecting
53- the number of processes with which NEST is run as argument.
52+ or for other files for output data (TAB-separated CSV files). They are format
53+ strings expecting the number of processes with which NEST is run as argument.
5454- Set `debug=True` on the decorator to see debug output and keep the
5555 temporary directory that has been created (latter works only in
5656 Python 3.12 and later)
@@ -239,10 +239,13 @@ def _collect_result_by_label(self, tmpdirpath, label):
239239 label += "-{}.dat"
240240
241241 try :
242- next (tmpdirpath .glob (label .format ("*" , "*" )))
242+ first_file = next (tmpdirpath .glob (label .format ("*" , "*" )))
243243 except StopIteration :
244244 return None # no data for this label
245245
246+ # Confirm we have tab-separated data. Assumes that all data have at least two columns.
247+ assert "\t " in open (first_file ).read (), "All data files must be tab-separated"
248+
246249 res = {}
247250 for n_procs in self ._procs_lst :
248251 data = []
@@ -255,6 +258,18 @@ def _collect_result_by_label(self, tmpdirpath, label):
255258
256259 return res
257260
261+ @staticmethod
262+ def _drop_empty_dataframes (data ):
263+ """
264+ Return list of non-empty dataframes in data.
265+
266+ The data frames collected for a given number of processes may contain empty
267+ dataframes. pandas.concat() will not support them any more in the future, so
268+ we filter them out for tests that use concat().
269+ """
270+
271+ return [df for df in data if not df .empty ]
272+
258273 def collect_results (self , tmpdirpath ):
259274 """
260275 For each of the result types, build a dictionary mapping number of MPI procs to a list of
@@ -280,36 +295,52 @@ def assert_correct_results(self, tmpdirpath):
280295 all_res = []
281296 if self ._spike :
282297 # For each number of procs, combine results across VPs and sort by time and sender
298+
299+ # Include only frames containing at least one non-nan value so pandas knows datatypes.
300+ # .all() returns True for empty arrays.
283301 all_res .append (
284302 [
285- pd .concat (spikes , ignore_index = True ).sort_values (
303+ pd .concat (self . _drop_empty_dataframes ( spikes ) , ignore_index = True ).sort_values (
286304 by = ["time_step" , "time_offset" , "sender" ], ignore_index = True
287305 )
288306 for spikes in self ._spike .values ()
289307 ]
290308 )
291309
292310 if self ._multi :
293- raise NotImplementedError ("MULTI is not ready yet" )
311+ # For each number of procs, combine results across VPs and sort by time and sender
312+ # Include only frames containing at least one non-nan value so pandas knows datatypes.
313+ # .all() returns True for empty arrays.
314+ all_res .append (
315+ [
316+ pd .concat (self ._drop_empty_dataframes (mmdata ), ignore_index = True ).sort_values (
317+ by = ["time_step" , "time_offset" , "sender" ], ignore_index = True
318+ )
319+ for mmdata in self ._multi .values ()
320+ ]
321+ )
294322
295323 if self ._other :
296324 # For each number of procs, combine across ranks or VPs (depends on what test has written) and
297325 # sort by all columns so that if results for different proc numbers are equal up to a permutation
298326 # of rows, the sorted frames will compare equal
327+ # Include only frames containing at least one non-nan value so pandas knows datatypes.
328+ # .all() returns True for empty arrays.
299329
300330 # next(iter(...)) returns the first value in the _other dictionary
301331 # [0] then picks the first DataFrame from that list
302332 # columns need to be converted to list() to be passed to sort_values()
303333 all_columns = list (next (iter (self ._other .values ()))[0 ].columns )
304334 all_res .append (
305335 [
306- pd .concat (others , ignore_index = True ).sort_values (by = all_columns , ignore_index = True )
336+ pd .concat (self ._drop_empty_dataframes (others ), ignore_index = True ).sort_values (
337+ by = all_columns , ignore_index = True
338+ )
307339 for others in self ._other .values ()
308340 ]
309341 )
310342
311343 assert all_res , "No test data collected"
312-
313344 for res in all_res :
314345 assert len (res ) == len (self ._procs_lst ), "Could not collect data for all procs"
315346
0 commit comments