Skip to content

Commit 43d5fb7

Browse files
committed
Reformat all Python files to line length 80, fix return type annotations
Also ignores the `tools/compare.py` and `tools/gbench/report.py` files for mypy, since they emit a barrage of errors which we can deal with later. The errors are mostly related to dynamic classmethod definition.
1 parent 738cd7f commit 43d5fb7

File tree

7 files changed

+155
-41
lines changed

7 files changed

+155
-41
lines changed

.ycm_extra_conf.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,9 @@ def GetCompilationInfoForFile(filename):
9292
for extension in SOURCE_EXTENSIONS:
9393
replacement_file = basename + extension
9494
if os.path.exists(replacement_file):
95-
compilation_info = database.GetCompilationInfoForFile(replacement_file)
95+
compilation_info = database.GetCompilationInfoForFile(
96+
replacement_file
97+
)
9698
if compilation_info.compiler_flags_:
9799
return compilation_info
98100
return None
@@ -108,7 +110,8 @@ def FlagsForFile(filename, **kwargs):
108110
return None
109111

110112
final_flags = MakeRelativePathsInFlagsAbsolute(
111-
compilation_info.compiler_flags_, compilation_info.compiler_working_dir_
113+
compilation_info.compiler_flags_,
114+
compilation_info.compiler_working_dir_,
112115
)
113116
else:
114117
relative_to = DirectoryOfThisScript()

bindings/python/google_benchmark/example.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,9 @@ def custom_counters(state):
8686
# Set a counter as a rate.
8787
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
8888
# Set a counter as an inverse of rate.
89-
state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
89+
state.counters["foo_inv_rate"] = Counter(
90+
num_foo, Counter.kIsRate | Counter.kInvert
91+
)
9092
# Set a counter as a thread-average quantity.
9193
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
9294
# There's also a combined flag:

setup.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import shutil
55
import sysconfig
66
from pathlib import Path
7+
from typing import Generator
78

89
import setuptools
910
from setuptools.command import build_ext
@@ -15,7 +16,7 @@
1516

1617

1718
@contextlib.contextmanager
18-
def temp_fill_include_path(fp: str):
19+
def temp_fill_include_path(fp: str) -> Generator[None, None, None]:
1920
"""Temporarily set the Python include path in a file."""
2021
with open(fp, "r+") as f:
2122
try:
@@ -56,7 +57,7 @@ def run(self):
5657
# explicitly call `bazel shutdown` for graceful exit
5758
self.spawn(["bazel", "shutdown"])
5859

59-
def bazel_build(self, ext: BazelExtension):
60+
def bazel_build(self, ext: BazelExtension) -> None:
6061
"""Runs the bazel build to create the package."""
6162
with temp_fill_include_path("WORKSPACE"):
6263
temp_path = Path(self.build_temp)
@@ -94,7 +95,9 @@ def bazel_build(self, ext: BazelExtension):
9495

9596
shared_lib_suffix = ".dll" if IS_WINDOWS else ".so"
9697
ext_name = ext.target_name + shared_lib_suffix
97-
ext_bazel_bin_path = temp_path / "bazel-bin" / ext.relpath / ext_name
98+
ext_bazel_bin_path = (
99+
temp_path / "bazel-bin" / ext.relpath / ext_name
100+
)
98101

99102
ext_dest_path = Path(self.get_ext_fullpath(ext.name))
100103
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)

tools/compare.py

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
#!/usr/bin/env python3
2+
3+
# type: ignore
4+
25
"""
36
compare.py - versatile benchmark output compare tool
47
"""
@@ -39,7 +42,8 @@ def check_inputs(in1, in2, flags):
3942
# --benchmark_filter=
4043
for flag in util.remove_benchmark_flags("--benchmark_filter=", flags):
4144
print(
42-
"WARNING: passing %s has no effect since both " "inputs are JSON" % flag
45+
"WARNING: passing %s has no effect since both "
46+
"inputs are JSON" % flag
4347
)
4448
if output_type is not None and output_type != "json":
4549
print(
@@ -53,7 +57,9 @@ def check_inputs(in1, in2, flags):
5357

5458

5559
def create_parser():
56-
parser = ArgumentParser(description="versatile benchmark output compare tool")
60+
parser = ArgumentParser(
61+
description="versatile benchmark output compare tool"
62+
)
5763

5864
parser.add_argument(
5965
"-a",
@@ -299,7 +305,9 @@ def main():
299305
# Now, filter the benchmarks so that the difference report can work
300306
if filter_baseline and filter_contender:
301307
replacement = "[%s vs. %s]" % (filter_baseline, filter_contender)
302-
json1 = gbench.report.filter_benchmark(json1_orig, filter_baseline, replacement)
308+
json1 = gbench.report.filter_benchmark(
309+
json1_orig, filter_baseline, replacement
310+
)
303311
json2 = gbench.report.filter_benchmark(
304312
json2_orig, filter_contender, replacement
305313
)
@@ -428,7 +436,9 @@ def test_filters_basic(self):
428436
self.assertFalse(parsed.benchmark_options)
429437

430438
def test_filters_with_remainder(self):
431-
parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d", "e"])
439+
parsed = self.parser.parse_args(
440+
["filters", self.testInput0, "c", "d", "e"]
441+
)
432442
self.assertFalse(parsed.display_aggregates_only)
433443
self.assertTrue(parsed.utest)
434444
self.assertEqual(parsed.mode, "filters")
@@ -464,7 +474,14 @@ def test_benchmarksfiltered_basic(self):
464474

465475
def test_benchmarksfiltered_with_remainder(self):
466476
parsed = self.parser.parse_args(
467-
["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e", "f"]
477+
[
478+
"benchmarksfiltered",
479+
self.testInput0,
480+
"c",
481+
self.testInput1,
482+
"e",
483+
"f",
484+
]
468485
)
469486
self.assertFalse(parsed.display_aggregates_only)
470487
self.assertTrue(parsed.utest)

tools/gbench/report.py

Lines changed: 91 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
"""report.py - Utilities for reporting statistics about benchmark results
1+
# type: ignore
2+
3+
"""
4+
report.py - Utilities for reporting statistics about benchmark results
25
"""
36

47
import copy
@@ -58,7 +61,10 @@ def color_format(use_color, fmt_str, *args, **kwargs):
5861
"""
5962
assert use_color is True or use_color is False
6063
if not use_color:
61-
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args]
64+
args = [
65+
arg if not isinstance(arg, BenchmarkColor) else BC_NONE
66+
for arg in args
67+
]
6268
kwargs = {
6369
key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
6470
for key, arg in kwargs.items()
@@ -293,8 +299,12 @@ def get_difference_report(json1, json2, utest=False):
293299
"cpu_time": bn["cpu_time"],
294300
"real_time_other": other_bench["real_time"],
295301
"cpu_time_other": other_bench["cpu_time"],
296-
"time": calculate_change(bn["real_time"], other_bench["real_time"]),
297-
"cpu": calculate_change(bn["cpu_time"], other_bench["cpu_time"]),
302+
"time": calculate_change(
303+
bn["real_time"], other_bench["real_time"]
304+
),
305+
"cpu": calculate_change(
306+
bn["cpu_time"], other_bench["cpu_time"]
307+
),
298308
}
299309
)
300310

@@ -320,11 +330,14 @@ def get_difference_report(json1, json2, utest=False):
320330
# benchmark suite.
321331
if measurements:
322332
run_type = (
323-
partition[0][0]["run_type"] if "run_type" in partition[0][0] else ""
333+
partition[0][0]["run_type"]
334+
if "run_type" in partition[0][0]
335+
else ""
324336
)
325337
aggregate_name = (
326338
partition[0][0]["aggregate_name"]
327-
if run_type == "aggregate" and "aggregate_name" in partition[0][0]
339+
if run_type == "aggregate"
340+
and "aggregate_name" in partition[0][0]
328341
else ""
329342
)
330343
diff_report.append(
@@ -447,7 +460,9 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
447460
def load_results(self):
448461
import json
449462

450-
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Inputs")
463+
testInputs = os.path.join(
464+
os.path.dirname(os.path.realpath(__file__)), "Inputs"
465+
)
451466
testOutput = os.path.join(testInputs, "test3_run0.json")
452467
with open(testOutput, "r") as f:
453468
json = json.load(f)
@@ -494,13 +509,69 @@ def test_json_diff_report_pretty_printing(self):
494509
["BM_SameTimes", "+0.0000", "+0.0000", "10", "10", "10", "10"],
495510
["BM_2xFaster", "-0.5000", "-0.5000", "50", "25", "50", "25"],
496511
["BM_2xSlower", "+1.0000", "+1.0000", "50", "100", "50", "100"],
497-
["BM_1PercentFaster", "-0.0100", "-0.0100", "100", "99", "100", "99"],
498-
["BM_1PercentSlower", "+0.0100", "+0.0100", "100", "101", "100", "101"],
499-
["BM_10PercentFaster", "-0.1000", "-0.1000", "100", "90", "100", "90"],
500-
["BM_10PercentSlower", "+0.1000", "+0.1000", "100", "110", "100", "110"],
501-
["BM_100xSlower", "+99.0000", "+99.0000", "100", "10000", "100", "10000"],
502-
["BM_100xFaster", "-0.9900", "-0.9900", "10000", "100", "10000", "100"],
503-
["BM_10PercentCPUToTime", "+0.1000", "-0.1000", "100", "110", "100", "90"],
512+
[
513+
"BM_1PercentFaster",
514+
"-0.0100",
515+
"-0.0100",
516+
"100",
517+
"99",
518+
"100",
519+
"99",
520+
],
521+
[
522+
"BM_1PercentSlower",
523+
"+0.0100",
524+
"+0.0100",
525+
"100",
526+
"101",
527+
"100",
528+
"101",
529+
],
530+
[
531+
"BM_10PercentFaster",
532+
"-0.1000",
533+
"-0.1000",
534+
"100",
535+
"90",
536+
"100",
537+
"90",
538+
],
539+
[
540+
"BM_10PercentSlower",
541+
"+0.1000",
542+
"+0.1000",
543+
"100",
544+
"110",
545+
"100",
546+
"110",
547+
],
548+
[
549+
"BM_100xSlower",
550+
"+99.0000",
551+
"+99.0000",
552+
"100",
553+
"10000",
554+
"100",
555+
"10000",
556+
],
557+
[
558+
"BM_100xFaster",
559+
"-0.9900",
560+
"-0.9900",
561+
"10000",
562+
"100",
563+
"10000",
564+
"100",
565+
],
566+
[
567+
"BM_10PercentCPUToTime",
568+
"+0.1000",
569+
"-0.1000",
570+
"100",
571+
"110",
572+
"100",
573+
"90",
574+
],
504575
["BM_ThirdFaster", "-0.3333", "-0.3334", "100", "67", "100", "67"],
505576
["BM_NotBadTimeUnit", "-0.9000", "+0.2000", "0", "0", "0", "1"],
506577
["BM_hasLabel", "+0.0000", "+0.0000", "1", "1", "1", "1"],
@@ -1126,7 +1197,9 @@ def test_json_diff_report(self):
11261197
assert_measurements(self, out, expected)
11271198

11281199

1129-
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(unittest.TestCase):
1200+
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
1201+
unittest.TestCase
1202+
):
11301203
@classmethod
11311204
def setUpClass(cls):
11321205
def load_results():
@@ -1409,7 +1482,9 @@ def test_json_diff_report_pretty_printing(self):
14091482

14101483
for n in range(len(self.json["benchmarks"]) ** 2):
14111484
random.shuffle(self.json["benchmarks"])
1412-
sorted_benchmarks = util.sort_benchmark_results(self.json)["benchmarks"]
1485+
sorted_benchmarks = util.sort_benchmark_results(self.json)[
1486+
"benchmarks"
1487+
]
14131488
self.assertEqual(len(expected_names), len(sorted_benchmarks))
14141489
for out, expected in zip(sorted_benchmarks, expected_names):
14151490
self.assertEqual(out["name"], expected)

tools/gbench/util.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ def classify_input_file(filename):
7272
ftype = IT_JSON
7373
else:
7474
err_msg = (
75-
"'%s' does not name a valid benchmark executable or JSON file" % filename
75+
"'%s' does not name a valid benchmark executable or JSON file"
76+
% filename
7677
)
7778
return ftype, err_msg
7879

@@ -189,7 +190,9 @@ def run_benchmark(exe_name, benchmark_flags):
189190
is_temp_output = True
190191
thandle, output_name = tempfile.mkstemp()
191192
os.close(thandle)
192-
benchmark_flags = list(benchmark_flags) + ["--benchmark_out=%s" % output_name]
193+
benchmark_flags = list(benchmark_flags) + [
194+
"--benchmark_out=%s" % output_name
195+
]
193196

194197
cmd = [exe_name] + benchmark_flags
195198
print("RUNNING: %s" % " ".join(cmd))
@@ -212,7 +215,9 @@ def run_or_load_benchmark(filename, benchmark_flags):
212215
"""
213216
ftype = check_input_file(filename)
214217
if ftype == IT_JSON:
215-
benchmark_filter = find_benchmark_flag("--benchmark_filter=", benchmark_flags)
218+
benchmark_filter = find_benchmark_flag(
219+
"--benchmark_filter=", benchmark_flags
220+
)
216221
return load_benchmark_results(filename, benchmark_filter)
217222
if ftype == IT_Executable:
218223
return run_benchmark(filename, benchmark_flags)

0 commit comments

Comments
 (0)