Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 0 additions & 34 deletions .github/workflows/benchmarks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,37 +77,3 @@ jobs:
push_options: '--force'
skip_checkout: true
file_pattern: 'benchmarks/bench.html benchmarks/bench.json'

benchmark_runner:
if: false
name: Benchmark Runner
runs-on: ubuntu-24.04

steps:

- uses: actions/checkout@v5
with:
persist-credentials: false

- name: Start Postgres
run: |
sudo systemctl start postgresql.service
sudo --login -u postgres psql -c "ALTER USER postgres WITH PASSWORD 'postgres';"
sudo --login -u postgres createdb fractal_test

- name: Install poetry
run: pipx install poetry==2.2.1

- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "poetry"

- name: Install dependencies
run: poetry install --with dev --no-interaction

- name: Benchmark
run: |
cd benchmarks/runner/
poetry run python benchmark_runner.py
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
* Major review of documentation, including making it up-to-date with v2.17.0 and relying more on autogenerated contents (\#2949, \#2983).
* Development:
* Add `shellcheck` to precommit, for `fractal-server/` files (\#2986).
* Testing:
* Update benchmarks (\#2990).
* Drop `benchmarks_runner` (\#2990).

# 2.17.1

Expand Down
34 changes: 30 additions & 4 deletions benchmarks/api_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,22 @@ def make_md_diff(self, agg_values_main: dict, agg_values_curr: dict):
)
template = env.get_template("bench_diff_template.md")

rendered_md = template.render(
zip=zip(agg_values_main.items(), agg_values_curr.items()),
exceptions=self.exceptions,
)
try:
rendered_md = template.render(
zip=zip(agg_values_main.items(), agg_values_curr.items()),
exceptions=self.exceptions,
)
except Exception as e:
print(f"ERROR in template.render. Original error {str(e)}")
rendered_md = (
"There was an exception in `template.render`, within "
"`make_md_diff`.\n"
f"Original error:\n{str(e)}\n"
"Main branch:\n"
f"```json\n{json.dumps(agg_values_main, indent=2)}\n```\n"
"Current branch:\n"
f"```json\n{json.dumps(agg_values_curr, indent=2)}\n```\n"
)

with open("bench_diff.md", "w") as output_file:
output_file.write(rendered_md)
Expand Down Expand Up @@ -168,6 +180,8 @@ def _replace_path_params(self, headers: dict, path: str):
updated_path = re.sub(pattern, lambda x: next(id_list), path)
else:
updated_path = path

print(f"OLD PATH: {path}\nNEW PATH: {updated_path}")
return updated_path

def make_user_metrics(
Expand Down Expand Up @@ -226,6 +240,13 @@ def make_user_metrics(
)

def run_benchmark(self, n_requests: int) -> list:
"""

Note that the following endpoints are only tested for the
`[email protected]` user:
1. POST /api/v2/project/$project_id$/dataset/$dataset_id$/images/query/
2. GET /api/v2/project/$project_id$/dataset/
"""
# time and size are the two keys to extract and make the average
keys_to_sum = ["time", "size"]
user_metrics: list[dict] = []
Expand All @@ -238,6 +259,11 @@ def run_benchmark(self, n_requests: int) -> list:
and user.name != "[email protected]"
):
pass
elif (
endpoint["path"] == "/api/v2/project/$project_id$/dataset/"
and user.name != "[email protected]"
):
pass
else:
user_metrics.append(
self.make_user_metrics(
Expand Down
76 changes: 6 additions & 70 deletions benchmarks/bench.json
Original file line number Diff line number Diff line change
Expand Up @@ -35,41 +35,12 @@
"size": 0.0
},
{
"path": "/api/v2/dataset/",
"verb": "GET",
"username": "vanilla",
"time": 7.9,
"size": 0.6
},
{
"path": "/api/v2/dataset/",
"verb": "GET",
"username": "power",
"time": 281.0,
"size": 389.0
},
{
"path": "/api/v2/dataset/",
"verb": "GET",
"username": "dataset",
"time": 108.1,
"size": 145.6
},
{
"path": "/api/v2/dataset/",
"verb": "GET",
"username": "project",
"time": 34.0,
"size": 50.9
},
{
"path": "/api/v2/dataset/",
"verb": "GET",
"username": "job",
"time": 20.0,
"size": 35.3
},
{






"path": "/api/v2/job/",
"verb": "GET",
"username": "vanilla",
Expand Down Expand Up @@ -174,41 +145,6 @@
"time": 4.6,
"size": 0.7
},
{
"path": "/api/v2/workflow/",
"verb": "GET",
"username": "vanilla",
"time": 10.0,
"size": 0.8
},
{
"path": "/api/v2/workflow/",
"verb": "GET",
"username": "power",
"time": 18.1,
"size": 27.2
},
{
"path": "/api/v2/workflow/",
"verb": "GET",
"username": "dataset",
"time": 14.2,
"size": 15.8
},
{
"path": "/api/v2/workflow/",
"verb": "GET",
"username": "project",
"time": 16.9,
"size": 19.9
},
{
"path": "/api/v2/workflow/",
"verb": "GET",
"username": "job",
"time": 9.2,
"size": 0.8
},
{
"path": "/api/v2/project/3/dataset/420/images/query/",
"verb": "POST",
Expand Down
1 change: 0 additions & 1 deletion benchmarks/runner/README.md

This file was deleted.

192 changes: 0 additions & 192 deletions benchmarks/runner/benchmark_runner.py

This file was deleted.

Loading
Loading