Skip to content

Commit

Permalink
Reformat all Python files to line length 80, fix return type annotations
Browse files Browse the repository at this point in the history
Also ignores the `tools/compare.py` and `tools/gbench/report.py` files
for mypy, since they emit a barrage of errors which we can deal with
later. The errors are mostly related to dynamic classmethod definition.
  • Loading branch information
nicholasjng committed Oct 27, 2023
1 parent 738cd7f commit 43d5fb7
Show file tree
Hide file tree
Showing 7 changed files with 155 additions and 41 deletions.
7 changes: 5 additions & 2 deletions .ycm_extra_conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ def GetCompilationInfoForFile(filename):
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(replacement_file)
compilation_info = database.GetCompilationInfoForFile(
replacement_file
)
if compilation_info.compiler_flags_:
return compilation_info
return None
Expand All @@ -108,7 +110,8 @@ def FlagsForFile(filename, **kwargs):
return None

final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_, compilation_info.compiler_working_dir_
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_,
)
else:
relative_to = DirectoryOfThisScript()
Expand Down
4 changes: 3 additions & 1 deletion bindings/python/google_benchmark/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,9 @@ def custom_counters(state):
# Set a counter as a rate.
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
# Set a counter as an inverse of rate.
state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
state.counters["foo_inv_rate"] = Counter(
num_foo, Counter.kIsRate | Counter.kInvert
)
# Set a counter as a thread-average quantity.
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
# There's also a combined flag:
Expand Down
9 changes: 6 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import shutil
import sysconfig
from pathlib import Path
from typing import Generator

import setuptools
from setuptools.command import build_ext
Expand All @@ -15,7 +16,7 @@


@contextlib.contextmanager
def temp_fill_include_path(fp: str):
def temp_fill_include_path(fp: str) -> Generator[None, None, None]:
"""Temporarily set the Python include path in a file."""
with open(fp, "r+") as f:
try:
Expand Down Expand Up @@ -56,7 +57,7 @@ def run(self):
# explicitly call `bazel shutdown` for graceful exit
self.spawn(["bazel", "shutdown"])

def bazel_build(self, ext: BazelExtension):
def bazel_build(self, ext: BazelExtension) -> None:
"""Runs the bazel build to create the package."""
with temp_fill_include_path("WORKSPACE"):
temp_path = Path(self.build_temp)
Expand Down Expand Up @@ -94,7 +95,9 @@ def bazel_build(self, ext: BazelExtension):

shared_lib_suffix = ".dll" if IS_WINDOWS else ".so"
ext_name = ext.target_name + shared_lib_suffix
ext_bazel_bin_path = temp_path / "bazel-bin" / ext.relpath / ext_name
ext_bazel_bin_path = (
temp_path / "bazel-bin" / ext.relpath / ext_name
)

ext_dest_path = Path(self.get_ext_fullpath(ext.name))
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
Expand Down
27 changes: 22 additions & 5 deletions tools/compare.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
#!/usr/bin/env python3

# type: ignore

"""
compare.py - versatile benchmark output compare tool
"""
Expand Down Expand Up @@ -39,7 +42,8 @@ def check_inputs(in1, in2, flags):
# --benchmark_filter=
for flag in util.remove_benchmark_flags("--benchmark_filter=", flags):
print(
"WARNING: passing %s has no effect since both " "inputs are JSON" % flag
"WARNING: passing %s has no effect since both "
"inputs are JSON" % flag
)
if output_type is not None and output_type != "json":
print(
Expand All @@ -53,7 +57,9 @@ def check_inputs(in1, in2, flags):


def create_parser():
parser = ArgumentParser(description="versatile benchmark output compare tool")
parser = ArgumentParser(
description="versatile benchmark output compare tool"
)

parser.add_argument(
"-a",
Expand Down Expand Up @@ -299,7 +305,9 @@ def main():
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = "[%s vs. %s]" % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(json1_orig, filter_baseline, replacement)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement
)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement
)
Expand Down Expand Up @@ -428,7 +436,9 @@ def test_filters_basic(self):
self.assertFalse(parsed.benchmark_options)

def test_filters_with_remainder(self):
parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d", "e"])
parsed = self.parser.parse_args(
["filters", self.testInput0, "c", "d", "e"]
)
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, "filters")
Expand Down Expand Up @@ -464,7 +474,14 @@ def test_benchmarksfiltered_basic(self):

def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e", "f"]
[
"benchmarksfiltered",
self.testInput0,
"c",
self.testInput1,
"e",
"f",
]
)
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
Expand Down
107 changes: 91 additions & 16 deletions tools/gbench/report.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
"""report.py - Utilities for reporting statistics about benchmark results
# type: ignore

"""
report.py - Utilities for reporting statistics about benchmark results
"""

import copy
Expand Down Expand Up @@ -58,7 +61,10 @@ def color_format(use_color, fmt_str, *args, **kwargs):
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args]
args = [
arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args
]
kwargs = {
key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()
Expand Down Expand Up @@ -293,8 +299,12 @@ def get_difference_report(json1, json2, utest=False):
"cpu_time": bn["cpu_time"],
"real_time_other": other_bench["real_time"],
"cpu_time_other": other_bench["cpu_time"],
"time": calculate_change(bn["real_time"], other_bench["real_time"]),
"cpu": calculate_change(bn["cpu_time"], other_bench["cpu_time"]),
"time": calculate_change(
bn["real_time"], other_bench["real_time"]
),
"cpu": calculate_change(
bn["cpu_time"], other_bench["cpu_time"]
),
}
)

Expand All @@ -320,11 +330,14 @@ def get_difference_report(json1, json2, utest=False):
# benchmark suite.
if measurements:
run_type = (
partition[0][0]["run_type"] if "run_type" in partition[0][0] else ""
partition[0][0]["run_type"]
if "run_type" in partition[0][0]
else ""
)
aggregate_name = (
partition[0][0]["aggregate_name"]
if run_type == "aggregate" and "aggregate_name" in partition[0][0]
if run_type == "aggregate"
and "aggregate_name" in partition[0][0]
else ""
)
diff_report.append(
Expand Down Expand Up @@ -447,7 +460,9 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json

testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Inputs")
testInputs = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "Inputs"
)
testOutput = os.path.join(testInputs, "test3_run0.json")
with open(testOutput, "r") as f:
json = json.load(f)
Expand Down Expand Up @@ -494,13 +509,69 @@ def test_json_diff_report_pretty_printing(self):
["BM_SameTimes", "+0.0000", "+0.0000", "10", "10", "10", "10"],
["BM_2xFaster", "-0.5000", "-0.5000", "50", "25", "50", "25"],
["BM_2xSlower", "+1.0000", "+1.0000", "50", "100", "50", "100"],
["BM_1PercentFaster", "-0.0100", "-0.0100", "100", "99", "100", "99"],
["BM_1PercentSlower", "+0.0100", "+0.0100", "100", "101", "100", "101"],
["BM_10PercentFaster", "-0.1000", "-0.1000", "100", "90", "100", "90"],
["BM_10PercentSlower", "+0.1000", "+0.1000", "100", "110", "100", "110"],
["BM_100xSlower", "+99.0000", "+99.0000", "100", "10000", "100", "10000"],
["BM_100xFaster", "-0.9900", "-0.9900", "10000", "100", "10000", "100"],
["BM_10PercentCPUToTime", "+0.1000", "-0.1000", "100", "110", "100", "90"],
[
"BM_1PercentFaster",
"-0.0100",
"-0.0100",
"100",
"99",
"100",
"99",
],
[
"BM_1PercentSlower",
"+0.0100",
"+0.0100",
"100",
"101",
"100",
"101",
],
[
"BM_10PercentFaster",
"-0.1000",
"-0.1000",
"100",
"90",
"100",
"90",
],
[
"BM_10PercentSlower",
"+0.1000",
"+0.1000",
"100",
"110",
"100",
"110",
],
[
"BM_100xSlower",
"+99.0000",
"+99.0000",
"100",
"10000",
"100",
"10000",
],
[
"BM_100xFaster",
"-0.9900",
"-0.9900",
"10000",
"100",
"10000",
"100",
],
[
"BM_10PercentCPUToTime",
"+0.1000",
"-0.1000",
"100",
"110",
"100",
"90",
],
["BM_ThirdFaster", "-0.3333", "-0.3334", "100", "67", "100", "67"],
["BM_NotBadTimeUnit", "-0.9000", "+0.2000", "0", "0", "0", "1"],
["BM_hasLabel", "+0.0000", "+0.0000", "1", "1", "1", "1"],
Expand Down Expand Up @@ -1126,7 +1197,9 @@ def test_json_diff_report(self):
assert_measurements(self, out, expected)


class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(unittest.TestCase):
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase
):
@classmethod
def setUpClass(cls):
def load_results():
Expand Down Expand Up @@ -1409,7 +1482,9 @@ def test_json_diff_report_pretty_printing(self):

for n in range(len(self.json["benchmarks"]) ** 2):
random.shuffle(self.json["benchmarks"])
sorted_benchmarks = util.sort_benchmark_results(self.json)["benchmarks"]
sorted_benchmarks = util.sort_benchmark_results(self.json)[
"benchmarks"
]
self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names):
self.assertEqual(out["name"], expected)
Expand Down
11 changes: 8 additions & 3 deletions tools/gbench/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def classify_input_file(filename):
ftype = IT_JSON
else:
err_msg = (
"'%s' does not name a valid benchmark executable or JSON file" % filename
"'%s' does not name a valid benchmark executable or JSON file"
% filename
)
return ftype, err_msg

Expand Down Expand Up @@ -189,7 +190,9 @@ def run_benchmark(exe_name, benchmark_flags):
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + ["--benchmark_out=%s" % output_name]
benchmark_flags = list(benchmark_flags) + [
"--benchmark_out=%s" % output_name
]

cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % " ".join(cmd))
Expand All @@ -212,7 +215,9 @@ def run_or_load_benchmark(filename, benchmark_flags):
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
benchmark_filter = find_benchmark_flag("--benchmark_filter=", benchmark_flags)
benchmark_filter = find_benchmark_flag(
"--benchmark_filter=", benchmark_flags
)
return load_benchmark_results(filename, benchmark_filter)
if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
Expand Down
Loading

0 comments on commit 43d5fb7

Please sign in to comment.