3333 "GITHUB_REPOSITORY" , "faster-cpython/bench_runner"
3434)
3535# Environment variables that control the execution of CPython
36- ENV_VARS = ["PYTHON_JIT" ]
36+ ENV_VARS = ["PYTHON_JIT" , "PYPERF_PERF_RECORD_EXTRA_OPTS" ]
3737
3838
3939class NoBenchmarkError (Exception ):
@@ -64,7 +64,7 @@ def get_benchmark_names(benchmarks: str) -> list[str]:
6464def run_benchmarks (
6565 python : PathLike ,
6666 benchmarks : str ,
67- command_prefix : Iterable [ str ] | None = None ,
67+ / ,
6868 test_mode : bool = False ,
6969 extra_args : Iterable [str ] | None = None ,
7070) -> None :
@@ -74,9 +74,6 @@ def run_benchmarks(
7474 if BENCHMARK_JSON .is_file ():
7575 BENCHMARK_JSON .unlink ()
7676
77- if command_prefix is None :
78- command_prefix = []
79-
8077 if test_mode :
8178 fast_arg = ["--fast" ]
8279 else :
@@ -86,7 +83,6 @@ def run_benchmarks(
8683 extra_args = []
8784
8885 args = [
89- * command_prefix ,
9086 sys .executable ,
9187 "-m" ,
9288 "pyperformance" ,
@@ -173,19 +169,36 @@ def collect_pystats(
173169 run_summarize_stats (python , fork , ref , "all" , benchmark_links , flags = flags )
174170
175171
176- def perf_to_csv (lines : Iterable [str ], output : PathLike ):
177- event_count_prefix = "# Event count (approx.): "
178- total = None
172+ def get_perf_lines (files : Iterable [PathLike ]) -> Iterable [str ]:
173+ for filename in files :
174+ p = subprocess .Popen (
175+ [
176+ "perf" ,
177+ "report" ,
178+ "--stdio" ,
179+ "-g" ,
180+ "none" ,
181+ "--show-total-period" ,
182+ "-s" ,
183+ "pid,symbol,dso" ,
184+ "-i" ,
185+ str (filename ),
186+ ],
187+ encoding = "utf-8" ,
188+ stdout = subprocess .PIPE ,
189+ bufsize = 1 ,
190+ )
191+ assert p .stdout is not None # for pyright
192+ yield from iter (p .stdout .readline , "" )
193+ p .kill ()
194+
179195
196+ def perf_to_csv (lines : Iterable [str ], output : PathLike ):
180197 rows = []
181198 for line in lines :
182199 line = line .strip ()
183- if line .startswith (event_count_prefix ):
184- total = int (line [len (event_count_prefix ) :].strip ())
185- elif line .startswith ("#" ) or line == "" :
200+ if line .startswith ("#" ) or line == "" :
186201 pass
187- elif total is None :
188- raise ValueError ("Could not find total sample count" )
189202 else :
190203 _ , period , command , _ , symbol , shared , _ = line .split (maxsplit = 6 )
191204 pid , command = command .split (":" )
@@ -209,47 +222,28 @@ def collect_perf(python: PathLike, benchmarks: str):
209222 shutil .rmtree (PROFILING_RESULTS )
210223 PROFILING_RESULTS .mkdir ()
211224
212- perf_data = Path ( "perf.data" )
225+ perf_data_glob = "perf.data.*"
213226 for benchmark in all_benchmarks :
214- if perf_data . exists ( ):
215- perf_data .unlink ()
216-
217- try :
218- run_benchmarks (
219- python ,
220- benchmark ,
221- command_prefix = [
222- "perf" ,
223- "record" ,
224- "-o" ,
225- "perf.data" ,
226- "--" ,
227- ] ,
227+ for filename in Path ( "." ). glob ( perf_data_glob ):
228+ filename .unlink ()
229+
230+ run_benchmarks (
231+ python ,
232+ benchmark ,
233+ extra_args = [ "--hook" , "perf_record" ] ,
234+ )
235+
236+ fileiter = Path ( "." ). glob ( perf_data_glob )
237+ if util . has_any_element ( fileiter ):
238+ perf_to_csv (
239+ get_perf_lines ( fileiter ) ,
240+ PROFILING_RESULTS / f" { benchmark } .perf.csv" ,
228241 )
229- except NoBenchmarkError :
230- pass
231242 else :
232- if perf_data .exists ():
233- output = subprocess .check_output (
234- [
235- "perf" ,
236- "report" ,
237- "--stdio" ,
238- "-g" ,
239- "none" ,
240- "--show-total-period" ,
241- "-s" ,
242- "pid,symbol,dso" ,
243- "-i" ,
244- "perf.data" ,
245- ],
246- encoding = "utf-8" ,
247- )
248- perf_to_csv (
249- output .splitlines (), PROFILING_RESULTS / f"{ benchmark } .perf.csv"
250- )
251- else :
252- print (f"No perf.data file generated for { benchmark } " , file = sys .stderr )
243+ print (f"No perf.data files generated for { benchmark } " , file = sys .stderr )
244+
245+ for filename in Path ("." ).glob (perf_data_glob ):
246+ filename .unlink ()
253247
254248
255249def update_metadata (
@@ -381,7 +375,7 @@ def _main(
381375 benchmarks = select_benchmarks (benchmarks )
382376
383377 if mode == "benchmark" :
384- run_benchmarks (python , benchmarks , [], test_mode )
378+ run_benchmarks (python , benchmarks , test_mode = test_mode )
385379 update_metadata (BENCHMARK_JSON , fork , ref , run_id = run_id )
386380 copy_to_directory (BENCHMARK_JSON , python , fork , ref , flags )
387381 elif mode == "perf" :
0 commit comments