Skip to content

Commit b7e45d2

Browse files
axel22Andrija Kolic
authored andcommitted
[GR-70656] [GR-70587] Add custom BenchmarkDispatcher implementations for stable PolyBench runs
PullRequest: graal/22378
2 parents 42dec2e + c31b5e6 commit b7e45d2

File tree

8 files changed

+881
-410
lines changed

8 files changed

+881
-410
lines changed

common.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"Jsonnet files should not include this file directly but use ci/common.jsonnet instead."
55
],
66

7-
"mx_version": "7.65.3",
7+
"mx_version": "7.67.0",
88

99
"COMMENT.jdks": "When adding or removing JDKs keep in sync with JDKs in ci/common.jsonnet",
1010
"jdks": {

sdk/mx.sdk/mx_sdk_benchmark.py

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272
import mx_sdk_vm_impl
7373
import mx_util
7474
from mx_util import Stage, StageName, Layer
75-
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite, Vm, SingleBenchmarkExecutionContext, ForkInfo
75+
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite, bm_exec_context, ConstantContextValueManager, SingleBenchmarkManager
7676
from mx_sdk_vm_impl import svm_experimental_options
7777

7878
_suite = mx.suite('sdk')
@@ -1786,7 +1786,7 @@ def get_layer_aware_build_args(self) -> List[str]:
17861786

17871787
def run_stage_image(self):
17881788
executable_name_args = ['-o', self.config.final_image_name]
1789-
pgo_args = [f"--pgo={self.config.profile_path}"]
1789+
pgo_args = [f"--pgo={self.config.bm_suite.get_pgo_profile_for_image_build(self.config.profile_path)}"]
17901790
if self.pgo_use_perf:
17911791
# -g is already set in base_image_build_args if we're not using perf. When using perf, if debug symbols
17921792
# are present they will interfere with sample decoding using source mappings.
@@ -1946,8 +1946,8 @@ def _prepare_for_running(self, args, out, err, cwd, nonZeroIsFatal):
19461946
self.stages_context = StagesContext(self, out, err, nonZeroIsFatal, os.path.abspath(cwd if cwd else os.getcwd()))
19471947
file_name = f"staged-benchmark.{self.ext}"
19481948
output_dir = self.bmSuite.get_image_output_dir(
1949-
self.bmSuite.benchmark_output_dir(self.bmSuite.execution_context.benchmark, args),
1950-
self.bmSuite.get_full_image_name(self.bmSuite.get_base_image_name(), self.bmSuite.execution_context.virtual_machine.config_name())
1949+
self.bmSuite.benchmark_output_dir(bm_exec_context().get("benchmark"), args),
1950+
self.bmSuite.get_full_image_name(self.bmSuite.get_base_image_name(), bm_exec_context().get("vm").config_name())
19511951
)
19521952
self.staged_program_file_path = output_dir / file_name
19531953
self.staged_program_file_path.parent.mkdir(parents=True, exist_ok=True)
@@ -3178,7 +3178,7 @@ def subgroup(self):
31783178
return "graal-compiler"
31793179

31803180
def benchmarkName(self):
3181-
return self.execution_context.benchmark
3181+
return bm_exec_context().get("benchmark")
31823182

31833183
def benchmarkList(self, bmSuiteArgs):
31843184
exclude = []
@@ -3226,8 +3226,9 @@ def validateEnvironment(self):
32263226
self.baristaProjectConfigurationPath()
32273227
self.baristaHarnessPath()
32283228

3229-
def new_execution_context(self, vm: Optional[Vm], benchmarks: List[str], bmSuiteArgs: List[str], fork_info: Optional[ForkInfo] = None) -> SingleBenchmarkExecutionContext:
3230-
return SingleBenchmarkExecutionContext(self, vm, benchmarks, bmSuiteArgs, fork_info)
3229+
def run(self, benchmarks, bmSuiteArgs) -> DataPoints:
3230+
with SingleBenchmarkManager(self):
3231+
return super().run(benchmarks, bmSuiteArgs)
32313232

32323233
def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
32333234
# Pass the VM options, BaristaCommand will form the final command.
@@ -3490,7 +3491,7 @@ def produceHarnessCommand(self, cmd, suite):
34903491
jvm_vm_options = jvm_cmd[index_of_java_exe + 1:]
34913492

34923493
# Verify that the run arguments don't already contain a "--mode" option
3493-
run_args = suite.runArgs(suite.execution_context.bmSuiteArgs) + self._energyTrackerExtraOptions(suite)
3494+
run_args = suite.runArgs(bm_exec_context().get("bm_suite_args")) + self._energyTrackerExtraOptions(suite)
34943495
mode_pattern = r"^(?:-m|--mode)(=.*)?$"
34953496
mode_match = self._regexFindInCommand(run_args, mode_pattern)
34963497
if mode_match:
@@ -4128,7 +4129,7 @@ def intercept_run(self, super_delegate: BenchmarkSuite, benchmarks, bm_suite_arg
41284129
datapoints: List[DataPoint] = []
41294130

41304131
vm = self.get_vm_registry().get_vm_from_suite_args(bm_suite_args)
4131-
with self.new_execution_context(vm, benchmarks, bm_suite_args):
4132+
with ConstantContextValueManager("vm", vm):
41324133
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
41334134
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm)
41344135

@@ -4261,7 +4262,7 @@ def run(self, benchmarks, bm_suite_args: List[str]) -> DataPoints:
42614262
fallback_reason = self.fallback_mode_reason(bm_suite_args)
42624263

42634264
vm = self.get_vm_registry().get_vm_from_suite_args(bm_suite_args)
4264-
with self.new_execution_context(vm, benchmarks, bm_suite_args):
4265+
with ConstantContextValueManager("vm", vm):
42654266
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
42664267
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm, bool(fallback_reason))
42674268

@@ -4502,6 +4503,13 @@ def get_image_output_dir(self, benchmark_output_dir: str, full_image_name: str)
45024503
"""
45034504
return Path(benchmark_output_dir).absolute() / "native-image-benchmarks" / full_image_name
45044505

4506+
def get_pgo_profile_for_image_build(self, default_pgo_profile: str) -> str:
4507+
vm_args = self.vmArgs(bm_exec_context().get("bm_suite_args"))
4508+
parsed_arg = parse_prefixed_arg("-Dnative-image.benchmark.pgo=", vm_args, "Native Image benchmark PGO profiles should only be specified once!")
4509+
if not parsed_arg:
4510+
return default_pgo_profile
4511+
return parsed_arg
4512+
45054513

45064514
def measureTimeToFirstResponse(bmSuite):
45074515
protocolHost = bmSuite.serviceHost()

sdk/mx.sdk/suite.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
# SOFTWARE.
4040
#
4141
suite = {
42-
"mxversion": "7.58.6",
42+
"mxversion": "7.67.0",
4343
"name" : "sdk",
4444
"version" : "25.1.0",
4545
"release" : False,

substratevm/mx.substratevm/mx_substratevm_benchmark.py

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
import mx
3737
import mx_benchmark
3838
import mx_sdk_benchmark
39+
from mx_benchmark import bm_exec_context, SingleBenchmarkManager
3940
from mx_sdk_benchmark import SUCCESSFUL_STAGE_PATTERNS, parse_prefixed_args
4041
from mx_util import StageName, Layer
4142

@@ -291,12 +292,7 @@ def benchmarkList(self, bmSuiteArgs):
291292

292293
def default_stages(self) -> List[str]:
293294
if self.benchmarkName() == "micronaut-pegasus":
294-
if (
295-
self.execution_context and
296-
self.execution_context.virtual_machine and
297-
self.execution_context.virtual_machine.config_name() and
298-
self.execution_context.virtual_machine.config_name().endswith("-ce")
299-
):
295+
if bm_exec_context().has("vm") and bm_exec_context().get("vm").config_name().endswith("-ce"):
300296
# fails on CE due to --enable-sbom EE only option injected from upstream pom (GR-66891)
301297
return []
302298
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM (GR-59793)
@@ -394,7 +390,8 @@ def build_assertions(self, benchmark: str, is_gate: bool) -> List[str]:
394390
return super().build_assertions(benchmark, is_gate)
395391

396392
def run(self, benchmarks, bmSuiteArgs) -> mx_benchmark.DataPoints:
397-
return self.intercept_run(super(), benchmarks, bmSuiteArgs)
393+
with SingleBenchmarkManager(self):
394+
return self.intercept_run(super(), benchmarks, bmSuiteArgs)
398395

399396
def ensure_image_is_at_desired_location(self, bmSuiteArgs):
400397
if self.stages_info.current_stage.is_image() and self.application_fixed_image_name() is not None:
@@ -441,7 +438,7 @@ def _get_built_app_image(self, suite, stage):
441438
In the case of `instrument-run`, retrieves the image built during `instrument-image`.
442439
In the case of `run`, retrieves the image built during `image`.
443440
"""
444-
vm = suite.execution_context.virtual_machine
441+
vm = bm_exec_context().get("vm")
445442
if stage.stage_name == StageName.INSTRUMENT_RUN:
446443
return vm.config.instrumented_image_path
447444
else:
@@ -470,15 +467,16 @@ def produceHarnessCommand(self, cmd, suite):
470467
raise TypeError(f"Expected an instance of {BaristaNativeImageBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}")
471468

472469
stage = suite.stages_info.current_stage
470+
bm_suite_args = bm_exec_context().get("bm_suite_args")
473471
if stage.is_agent():
474472
# BaristaCommand works for agent stage, since it's a JVM stage
475473
cmd = self.produce_JVM_harness_command(cmd, suite)
476474
# Make agent run short
477475
cmd += self._short_load_testing_phases()
478476
# Add explicit agent stage args
479477
cmd += self._energyTrackerExtraOptions(suite)
480-
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.execution_context.bmSuiteArgs)
481-
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-agent-run-arg=", suite.execution_context.bmSuiteArgs)
478+
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", bm_suite_args)
479+
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-agent-run-arg=", bm_suite_args)
482480
return cmd
483481

484482
# Extract app image options and command prefix from the NativeImageVM command
@@ -499,18 +497,18 @@ def produceHarnessCommand(self, cmd, suite):
499497
ni_barista_cmd = [suite.baristaHarnessPath(), "--mode", "native", "--app-executable", app_image]
500498
if barista_workload is not None:
501499
ni_barista_cmd.append(f"--config={barista_workload}")
502-
ni_barista_cmd += suite.runArgs(suite.execution_context.bmSuiteArgs) + self._energyTrackerExtraOptions(suite)
503-
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.execution_context.bmSuiteArgs)
500+
ni_barista_cmd += suite.runArgs(bm_suite_args) + self._energyTrackerExtraOptions(suite)
501+
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", bm_suite_args)
504502
if stage.is_instrument():
505503
# Make instrument run short
506504
ni_barista_cmd += self._short_load_testing_phases()
507-
if suite.execution_context.benchmark == "play-scala-hello-world":
505+
if bm_exec_context().get("benchmark") == "play-scala-hello-world":
508506
self._updateCommandOption(ni_barista_cmd, "--vm-options", "-v", "-Dpidfile.path=/dev/null")
509507
# Add explicit instrument stage args
510-
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-profile-run-arg=", suite.execution_context.bmSuiteArgs) or parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.execution_context.bmSuiteArgs)
508+
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-profile-run-arg=", bm_suite_args) or parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", bm_suite_args)
511509
else:
512510
# Add explicit run stage args
513-
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.execution_context.bmSuiteArgs)
511+
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", bm_suite_args)
514512
if nivm_cmd_prefix:
515513
self._updateCommandOption(ni_barista_cmd, "--cmd-app-prefix", "-p", " ".join(nivm_cmd_prefix))
516514
if nivm_app_options:

substratevm/mx.substratevm/suite.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# pylint: disable=line-too-long
22
suite = {
3-
"mxversion": "7.58.6",
3+
"mxversion": "7.67.0",
44
"name": "substratevm",
55
"version" : "25.1.0",
66
"release" : False,

0 commit comments

Comments
 (0)