1313
1414import browser_common
1515import common
16+ from color_runner import BufferingMixin
1617from common import errlog
1718
1819from tools import emprofile , utils
19- from tools .colored_logger import CYAN , GREEN , RED , with_color
2020from tools .utils import WINDOWS
2121
2222EMTEST_VISUALIZE = os .getenv ('EMTEST_VISUALIZE' )
@@ -66,6 +66,10 @@ def run_test(args):
6666 test .set_temp_dir (temp_dir )
6767 try :
6868 test (result )
69+ except KeyboardInterrupt :
70+ # In case of KeyboardInterrupt do not emit buffered stderr/stdout
71+ # as we unwind.
72+ result ._mirrorOutput = False
6973 finally :
7074 result .elapsed = time .perf_counter () - start_time
7175
@@ -106,43 +110,11 @@ def __init__(self, max_cores, options):
106110 self .max_cores = max_cores
107111 self .max_failures = options .max_failures
108112 self .failing_and_slow_first = options .failing_and_slow_first
109- self .progress_counter = 0
110113
111114 def addTest (self , test ):
112115 super ().addTest (test )
113116 test .is_parallel = True
114117
115- def printOneResult (self , res ):
116- self .progress_counter += 1
117- progress = f'[{ self .progress_counter } /{ self .num_tests } ] '
118-
119- if res .test_result == 'success' :
120- msg = 'ok'
121- color = GREEN
122- elif res .test_result == 'errored' :
123- msg = 'ERROR'
124- color = RED
125- elif res .test_result == 'failed' :
126- msg = 'FAIL'
127- color = RED
128- elif res .test_result == 'skipped' :
129- reason = res .skipped [0 ][1 ]
130- msg = f"skipped '{ reason } '"
131- color = CYAN
132- elif res .test_result == 'unexpected success' :
133- msg = 'unexpected success'
134- color = RED
135- elif res .test_result == 'expected failure' :
136- color = RED
137- msg = 'expected failure'
138- else :
139- assert False , f'unhandled test result { res .test_result } '
140-
141- if res .test_result != 'skipped' :
142- msg += f' ({ res .elapsed :.2f} s)'
143-
144- errlog (f'{ with_color (CYAN , progress )} { res .test } ... { with_color (color , msg )} ' )
145-
146118 def run (self , result ):
147119 # The 'spawn' method is used on windows and it can be useful to set this on
148120 # all platforms when debugging multiprocessing issues. Without this we
@@ -151,6 +123,12 @@ def run(self, result):
151123 # issues.
152124 # multiprocessing.set_start_method('spawn')
153125
126+ # No need to worry about stdout/stderr buffering since are a not
127+ # actually running the test here, only setting the results.
128+ buffer = result .buffer
129+ result .buffer = False
130+
131+ result .core_time = 0
154132 tests = self .get_sorted_tests ()
155133 self .num_tests = self .countTestCases ()
156134 contains_browser_test = any (test .is_browser_test () for test in tests )
@@ -176,23 +154,23 @@ def run(self, result):
176154 allowed_failures_counter = manager .Value ('i' , self .max_failures )
177155
178156 results = []
179- args = ((t , allowed_failures_counter , result . buffer ) for t in tests )
157+ args = ((t , allowed_failures_counter , buffer ) for t in tests )
180158 for res in pool .imap_unordered (run_test , args , chunksize = 1 ):
181159 # results may be be None if # of allowed errors was exceeded
182160 # and the harness aborted.
183161 if res :
184162 if res .test_result not in ['success' , 'skipped' ] and allowed_failures_counter is not None :
185163 # Signal existing multiprocess pool runners so that they can exit early if needed.
186164 allowed_failures_counter .value -= 1
187- self . printOneResult ( res )
165+ res . integrate_result ( result )
188166 results .append (res )
189167
190168 # Send a task to each worker to tear down the browser and server. This
191169 # relies on the implementation detail in the worker pool that all workers
192170 # are cycled through once.
193171 num_tear_downs = sum ([pool .apply (tear_down , ()) for i in range (use_cores )])
194172 # Assert the assumed behavior above hasn't changed.
195- if num_tear_downs != use_cores :
173+ if num_tear_downs != use_cores and not buffer :
196174 errlog (f'Expected { use_cores } teardowns, got { num_tear_downs } ' )
197175
198176 if self .failing_and_slow_first :
@@ -218,7 +196,9 @@ def update_test_results_to(test_name):
218196
219197 json .dump (previous_test_run_results , open (common .PREVIOUS_TEST_RUN_RESULTS_FILE , 'w' ), indent = 2 )
220198
221- return self .combine_results (result , results )
199+ if EMTEST_VISUALIZE :
200+ self .visualize_results (results )
201+ return result
222202
223203 def get_sorted_tests (self ):
224204 """A list of this suite's tests, sorted with the @is_slow_test tests first.
@@ -237,45 +217,29 @@ def test_key(test):
237217
238218 return sorted (self , key = test_key , reverse = True )
239219
240- def combine_results (self , result , buffered_results ):
241- errlog ('' )
242- errlog ('DONE: combining results on main thread' )
243- errlog ('' )
220+ def visualize_results (self , results ):
221+ assert EMTEST_VISUALIZE
244222 # Sort the results back into alphabetical order. Running the tests in
245223 # parallel causes mis-orderings, this makes the results more readable.
246- results = sorted (buffered_results , key = lambda res : str (res .test ))
247- result .core_time = 0
224+ results = sorted (results , key = lambda res : str (res .test ))
248225
249226 # shared data structures are hard in the python multi-processing world, so
250227 # use a file to share the flaky test information across test processes.
251228 flaky_tests = open (common .flaky_tests_log_filename ).read ().split () if os .path .isfile (common .flaky_tests_log_filename ) else []
252229 # Extract only the test short names
253230 flaky_tests = [x .split ('.' )[- 1 ] for x in flaky_tests ]
254231
255- # The next integrateResult loop will print a *lot* of lines really fast. This
256- # will cause a Python exception being thrown when attempting to print to
257- # stderr, if stderr is in nonblocking mode, like it is on Buildbot CI:
258- # See https://github.com/buildbot/buildbot/issues/8659
259- # To work around that problem, set stderr to blocking mode before printing.
260- if not WINDOWS :
261- os .set_blocking (sys .stderr .fileno (), True )
262-
263232 for r in results :
264- # Integrate the test result to the global test result object
265- r .integrateResult (result )
266233 r .log_test_run_for_visualization (flaky_tests )
267234
268235 # Generate the parallel test run visualization
269- if EMTEST_VISUALIZE :
270- emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
271- # Cleanup temp files that were used for the visualization
272- emprofile .delete_profiler_logs ()
273- utils .delete_file (common .flaky_tests_log_filename )
274-
275- return result
236+ emprofile .create_profiling_graph (utils .path_from_root ('out/graph' ))
237+ # Cleanup temp files that were used for the visualization
238+ emprofile .delete_profiler_logs ()
239+ utils .delete_file (common .flaky_tests_log_filename )
276240
277241
278- class BufferedParallelTestResult (unittest .TestResult ):
242+ class BufferedParallelTestResult (BufferingMixin , unittest .TestResult ):
279243 """A picklable struct used to communicate test results across processes
280244 """
281245 def __init__ (self ):
@@ -293,15 +257,12 @@ def test_short_name(self):
293257 def addDuration (self , test , elapsed ):
294258 self .test_duration = elapsed
295259
296- def integrateResult (self , overall_results ):
260+ def integrate_result (self , overall_results ):
297261 """This method get called on the main thread once the buffered result
298- is received. It add the buffered result to the overall result."""
262+ is received. It adds the buffered result to the overall result."""
299263 # The exception info objects that we are adding here have already
300264 # been turned into strings so make _exc_info_to_string into a no-op.
301265 overall_results ._exc_info_to_string = lambda x , _y : x
302- # No need to worry about stdout/stderr buffering since are a not
303- # actually running the test here, only setting the results.
304- overall_results .buffer = False
305266 overall_results .startTest (self .test )
306267 if self .test_result == 'success' :
307268 overall_results .addSuccess (self .test )
@@ -321,7 +282,8 @@ def integrateResult(self, overall_results):
321282 overall_results .core_time += self .test_duration
322283
323284 def log_test_run_for_visualization (self , flaky_tests ):
324- if EMTEST_VISUALIZE and (self .test_result != 'skipped' or self .test_duration > 0.2 ):
285+ assert EMTEST_VISUALIZE
286+ if self .test_result != 'skipped' or self .test_duration > 0.2 :
325287 test_result = self .test_result
326288 if test_result == 'success' and self .test_short_name () in flaky_tests :
327289 test_result = 'warnings'
0 commit comments