diff --git a/.gitlab/benchmarks/microbenchmarks.yml b/.gitlab/benchmarks/microbenchmarks.yml index 7e2f105bf65..1e50ef52ada 100644 --- a/.gitlab/benchmarks/microbenchmarks.yml +++ b/.gitlab/benchmarks/microbenchmarks.yml @@ -31,7 +31,7 @@ variables: capture-hardware-software-info.sh - if [[ $SCENARIO =~ ^flask_* || $SCENARIO =~ ^django_* ]]; + if [[ $SCENARIO =~ ^flask_* || $SCENARIO =~ ^django_* || $SCENARIO =~ ^startup* ]]; then BP_SCENARIO=$SCENARIO bp-runner "${CI_PROJECT_DIR:-.}/.gitlab/benchmarks/bp-runner.yml" --debug -t else @@ -153,6 +153,7 @@ microbenchmarks: - "core_api" - "otel_span" - "otel_sdk_span" + - "startup" - "appsec_iast_aspects" - "appsec_iast_aspects_ospath" - "appsec_iast_aspects_re_module" @@ -174,9 +175,7 @@ microbenchmarks: - "packages_update_imported_dependencies" - "recursive_computation" - "telemetry_add_metric" - # They take a long time to run, and now need the agent running - # TODO: Make benchmarks faster, or run less frequently, or as macrobenchmarks - # - "startup" + benchmarks-pr-comment: image: $MICROBENCHMARKS_CI_IMAGE diff --git a/benchmarks/bm/_scenario.py b/benchmarks/bm/_scenario.py index 155a9752beb..69bc476b728 100644 --- a/benchmarks/bm/_scenario.py +++ b/benchmarks/bm/_scenario.py @@ -25,7 +25,7 @@ def add_cmdline_args(cmd, args): cmd = runner.argparser for _field in dataclasses.fields(scenario_cls): - if _field.name == "cprofile_loops": + if _field.name in ("cprofile_loops", "inner_loops"): continue cmd.add_argument("--{}".format(_field.name), type=_field.type if _field.type is not bool else str_to_bool) @@ -50,7 +50,7 @@ def add_cmdline_args(cmd, args): finally: pr.dump_stats(pstats_output) - runner.bench_time_func(scenario.scenario_name, scenario._pyperf) + runner.bench_time_func(scenario.scenario_name, scenario._pyperf, inner_loops=scenario._inner_loops) @dataclasses.dataclass @@ -81,6 +81,18 @@ def _cprofile_loops(self) -> int: """ return getattr(self, "cprofile_loops", 200) + @property + def _inner_loops(self) -> typing.Optional[int]: + """Returns the number of inner loops to run for each pyperf iteration. + + This is useful for scenarios that have a very long execution time per operation. + + This can be set in the scenario class as a class variable, "inner_loops", or defaults to None. + + If None, pyperf will determine the number of inner loops automatically. + """ + return getattr(self, "inner_loops", None) + @abc.abstractmethod def run(self) -> typing.Generator[typing.Callable[[int], None], None, None]: """Returns a context manager that yields a function to be run for performance testing.""" diff --git a/benchmarks/startup/scenario.py b/benchmarks/startup/scenario.py index f7946590100..8ab19144628 100644 --- a/benchmarks/startup/scenario.py +++ b/benchmarks/startup/scenario.py @@ -17,6 +17,9 @@ class Startup(bm.Scenario): # Not helpful for subprocess benchmarks cprofile_loops: int = 0 + # This benchmark takes a long time to run, reduce the inner loops + inner_loops: int = 10 + def run(self): env = os.environ.copy() if self.env: