1919from tools .colored_logger import CYAN , GREEN , RED , with_color
2020from tools .utils import WINDOWS
2121
22+ EMTEST_VISUALIZE = os .getenv ('EMTEST_VISUALIZE' )
2223NUM_CORES = None
2324seen_class = set ()
2425torn_down = False
@@ -43,7 +44,8 @@ def cap_max_workers_in_pool(max_workers, is_browser):
4344 return max_workers
4445
4546
46- def run_test (test , allowed_failures_counter , lock , progress_counter , num_tests , buffer ):
47+ def run_test (args ):
48+ test , allowed_failures_counter , lock , buffer = args
4749 # If we have exceeded the number of allowed failures during the test run,
4850 # abort executing further tests immediately.
4951 if allowed_failures_counter and allowed_failures_counter .value < 0 :
@@ -56,38 +58,6 @@ def test_failed():
5658
5759 start_time = time .perf_counter ()
5860
59- def compute_progress ():
60- if not lock :
61- return ''
62- with lock :
63- val = f'[{ int (progress_counter .value * 100 / num_tests )} %] '
64- progress_counter .value += 1
65- return with_color (CYAN , val )
66-
67- def printResult (res ):
68- elapsed = time .perf_counter () - start_time
69- progress = compute_progress ()
70- if res .test_result == 'success' :
71- msg = f'ok ({ elapsed :.2f} s)'
72- errlog (f'{ progress } { res .test } ... { with_color (GREEN , msg )} ' )
73- elif res .test_result == 'errored' :
74- msg = f'{ res .test } ... ERROR'
75- errlog (f'{ progress } { with_color (RED , msg )} ' )
76- elif res .test_result == 'failed' :
77- msg = f'{ res .test } ... FAIL'
78- errlog (f'{ progress } { with_color (RED , msg )} ' )
79- elif res .test_result == 'skipped' :
80- msg = f"skipped '{ res .buffered_result .reason } '"
81- errlog (f"{ progress } { res .test } ... { with_color (CYAN , msg )} " )
82- elif res .test_result == 'unexpected success' :
83- msg = f'unexpected success ({ elapsed :.2f} s)'
84- errlog (f'{ progress } { res .test } ... { with_color (RED , msg )} ' )
85- elif res .test_result == 'expected failure' :
86- msg = f'expected failure ({ elapsed :.2f} s)'
87- errlog (f'{ progress } { res .test } ... { with_color (RED , msg )} ' )
88- else :
89- assert False
90-
9161 olddir = os .getcwd ()
9262 result = BufferedParallelTestResult ()
9363 result .start_time = start_time
@@ -109,12 +79,19 @@ def printResult(res):
10979 result .addError (test , e )
11080 test_failed ()
11181 finally :
112- printResult ( result )
82+ result . elapsed = time . perf_counter () - start_time
11383
11484 # Before attempting to delete the tmp dir make sure the current
11585 # working directory is not within it.
11686 os .chdir (olddir )
11787 common .force_delete_dir (temp_dir )
88+
89+ # Since we are returning this result to the main thread we need to make sure
90+ # that it is serializable/picklable. To do this, we delete any non-picklable
91+ # fields from the instance.
92+ del result ._original_stdout
93+ del result ._original_stderr
94+
11895 return result
11996
12097
@@ -141,11 +118,44 @@ def __init__(self, max_cores, options):
141118 self .max_cores = max_cores
142119 self .max_failures = options .max_failures
143120 self .failing_and_slow_first = options .failing_and_slow_first
121+ self .progress_counter = 0
144122
145123 def addTest (self , test ):
146124 super ().addTest (test )
147125 test .is_parallel = True
148126
127+ def printOneResult (self , res ):
128+ percent = int (self .progress_counter * 100 / self .num_tests )
129+ progress = f'[{ percent :2d} %] '
130+ self .progress_counter += 1
131+
132+ if res .test_result == 'success' :
133+ msg = 'ok'
134+ color = GREEN
135+ elif res .test_result == 'errored' :
136+ msg = 'ERROR'
137+ color = RED
138+ elif res .test_result == 'failed' :
139+ msg = 'FAIL'
140+ color = RED
141+ elif res .test_result == 'skipped' :
142+ reason = res .skipped [0 ][1 ]
143+ msg = f"skipped '{ reason } '"
144+ color = CYAN
145+ elif res .test_result == 'unexpected success' :
146+ msg = 'unexpected success'
147+ color = RED
148+ elif res .test_result == 'expected failure' :
149+ color = RED
150+ msg = 'expected failure'
151+ else :
152+ assert False , f'unhandled test result { res .test_result } '
153+
154+ if res .test_result != 'skipped' :
155+ msg += f' ({ res .elapsed :.2f} s)'
156+
157+ errlog (f'{ with_color (CYAN , progress )} { res .test } ... { with_color (color , msg )} ' )
158+
149159 def run (self , result ):
150160 # The 'spawn' method is used on windows and it can be useful to set this on
151161 # all platforms when debugging multiprocessing issues. Without this we
@@ -155,6 +165,7 @@ def run(self, result):
155165 # multiprocessing.set_start_method('spawn')
156166
157167 tests = self .get_sorted_tests ()
168+ self .num_tests = len (tests )
158169 contains_browser_test = any (test .is_browser_test () for test in tests )
159170 use_cores = cap_max_workers_in_pool (min (self .max_cores , len (tests ), num_cores ()), contains_browser_test )
160171 errlog (f'Using { use_cores } parallel test processes' )
@@ -170,15 +181,23 @@ def run(self, result):
170181 if python_multiprocessing_structures_are_buggy ():
171182 # When multiprocessing shared structures are buggy we don't support failfast
172183 # or the progress bar.
173- allowed_failures_counter = progress_counter = lock = None
184+ allowed_failures_counter = lock = None
174185 if self .max_failures < 2 ** 31 - 1 :
175186 errlog ('The version of python being used is not compatible with --failfast and --max-failures options. See https://github.com/python/cpython/issues/71936' )
176187 sys .exit (1 )
177188 else :
178189 allowed_failures_counter = manager .Value ('i' , self .max_failures )
179- progress_counter = manager .Value ('i' , 0 )
180190 lock = manager .Lock ()
181- results = pool .starmap (run_test , ((t , allowed_failures_counter , lock , progress_counter , len (tests ), result .buffer ) for t in tests ), chunksize = 1 )
191+
192+ results = []
193+ args = ((t , allowed_failures_counter , lock , result .buffer ) for t in tests )
194+ for res in pool .imap_unordered (run_test , args , chunksize = 1 ):
195+ # results may be be None if # of allowed errors was exceeded
196+ # and the harness aborted.
197+ if res :
198+ self .printOneResult (res )
199+ results .append (res )
200+
182201 # Send a task to each worker to tear down the browser and server. This
183202 # relies on the implementation detail in the worker pool that all workers
184203 # are cycled through once.
@@ -187,9 +206,6 @@ def run(self, result):
187206 if num_tear_downs != use_cores :
188207 errlog (f'Expected { use_cores } teardowns, got { num_tear_downs } ' )
189208
190- # Filter out the None results which can occur if # of allowed errors was exceeded and the harness aborted.
191- results = [r for r in results if r is not None ]
192-
193209 if self .failing_and_slow_first :
194210 previous_test_run_results = common .load_previous_test_run_results ()
195211 for r in results :
@@ -263,7 +279,7 @@ def combine_results(self, result, buffered_results):
263279 r .updateResult (result )
264280
265281 # Generate the parallel test run visualization
266- if os . getenv ( ' EMTEST_VISUALIZE' ) :
282+ if EMTEST_VISUALIZE :
267283 emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
268284 # Cleanup temp files that were used for the visualization
269285 emprofile .delete_profiler_logs ()
@@ -302,7 +318,7 @@ def updateResult(self, result):
302318 self .log_test_run_for_visualization ()
303319
304320 def log_test_run_for_visualization (self ):
305- if os . getenv ( ' EMTEST_VISUALIZE' ) and (self .test_result != 'skipped' or self .test_duration > 0.2 ):
321+ if EMTEST_VISUALIZE and (self .test_result != 'skipped' or self .test_duration > 0.2 ):
306322 profiler_logs_path = os .path .join (tempfile .gettempdir (), 'emscripten_toolchain_profiler_logs' )
307323 os .makedirs (profiler_logs_path , exist_ok = True )
308324 profiler_log_file = os .path .join (profiler_logs_path , 'toolchain_profiler.pid_0.json' )
@@ -319,7 +335,7 @@ def log_test_run_for_visualization(self):
319335 # block, so generate one on the fly.
320336 dummy_test_task_counter = os .path .getsize (profiler_log_file ) if os .path .isfile (profiler_log_file ) else 0
321337 # Remove the redundant 'test_' prefix from each test, since character space is at a premium in the visualized graph.
322- test_name = self .test_short_name (). removeprefix ( 'test_' )
338+ test_name = utils . removeprefix ( self .test_short_name (), 'test_' )
323339 with open (profiler_log_file , 'a' ) as prof :
324340 prof .write (f',\n {{"pid":{ dummy_test_task_counter } ,"op":"start","time":{ self .start_time } ,"cmdLine":["{ test_name } "],"color":"{ colors [self .test_result ]} "}}' )
325341 prof .write (f',\n {{"pid":{ dummy_test_task_counter } ,"op":"exit","time":{ self .start_time + self .test_duration } ,"returncode":0}}' )
@@ -333,11 +349,6 @@ def stopTest(self, test):
333349 # TODO(sbc): figure out a way to display this duration information again when
334350 # these results get passed back to the TextTestRunner/TextTestResult.
335351 self .buffered_result .duration = self .test_duration
336- # Once we are done running the test and any stdout/stderr buffering has
337- # being taking care or, we delete these fields which the parent class uses.
338- # This is because they are not picklable (serializable).
339- del self ._original_stdout
340- del self ._original_stderr
341352
342353 def addSuccess (self , test ):
343354 super ().addSuccess (test )
0 commit comments