Skip to content

Commit 37eefd3

Browse files
committed
Minor optimizations by using comprehensions
1 parent 8f3b470 commit 37eefd3

File tree

6 files changed

+13
-29
lines changed

6 files changed

+13
-29
lines changed

pyperf/__main__.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -349,11 +349,7 @@ def group_by_name(self):
349349
def group_by_name_ignored(self):
350350
names = set(self._group_by_name_names())
351351
for suite in self.suites:
352-
ignored = []
353-
for bench in suite:
354-
if bench.get_name() not in names:
355-
ignored.append(bench)
356-
if ignored:
352+
if ignored := [bench for bench in suite if bench.get_name() not in names]:
357353
yield (suite, ignored)
358354

359355

pyperf/_bench.py

+1-4
Original file line numberDiff line numberDiff line change
@@ -557,10 +557,7 @@ def _filter_runs(self, include, only_runs):
557557
if include:
558558
old_runs = self._runs
559559
max_index = len(old_runs) - 1
560-
runs = []
561-
for index in only_runs:
562-
if index <= max_index:
563-
runs.append(old_runs[index])
560+
runs = [old_runs[index] for index in only_runs if index <= max_index]
564561
else:
565562
runs = self._runs
566563
max_index = len(runs) - 1

pyperf/_compare.py

+6-11
Original file line numberDiff line numberDiff line change
@@ -163,10 +163,7 @@ def __init__(self, headers, rows):
163163
self.widths[column] = max(self.widths[column], len(cell))
164164

165165
def _render_line(self, char='-'):
166-
parts = ['']
167-
for width in self.widths:
168-
parts.append(char * (width + 2))
169-
parts.append('')
166+
parts = [''] + [char * (width + 2) for width in self.widths] + ['']
170167
return '+'.join(parts)
171168

172169
def _render_row(self, row):
@@ -250,7 +247,7 @@ def __init__(self, benchmarks, args):
250247
for results in self.all_results:
251248
for result in results:
252249
self.tags.update(get_tags_for_result(result))
253-
self.tags = sorted(list(self.tags))
250+
self.tags = sorted(self.tags)
254251

255252
def compare_benchmarks(self, name, benchmarks):
256253
min_speed = self.min_speed
@@ -280,9 +277,9 @@ def sort_key(results):
280277

281278
self.all_results.sort(key=sort_key)
282279

283-
headers = ['Benchmark', self.all_results[0][0].ref.name]
284-
for item in self.all_results[0]:
285-
headers.append(item.changed.name)
280+
headers = ['Benchmark', self.all_results[0][0].ref.name] + [
281+
item.changed.name for item in self.all_results[0]
282+
]
286283

287284
all_norm_means = [[] for _ in range(len(headers[2:]))]
288285

@@ -427,9 +424,7 @@ def list_ignored(self):
427424
def compare_geometric_mean(self, all_results):
428425
# use a list since two filenames can be identical,
429426
# even if results are different
430-
all_norm_means = []
431-
for item in all_results[0]:
432-
all_norm_means.append((item.changed.name, []))
427+
all_norm_means = [(item.changed.name, []) for item in all_results[0]]
433428

434429
for results in all_results:
435430
for index, result in enumerate(results):

pyperf/_cpu_utils.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ def parse_cpu_list(cpu_list):
9090
parts = part.split('-', 1)
9191
first = int(parts[0])
9292
last = int(parts[1])
93-
for cpu in range(first, last + 1):
94-
cpus.append(cpu)
93+
cpus.extend(range(first, last + 1))
9594
else:
9695
cpus.append(int(part))
9796
cpus.sort()

pyperf/_utils.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,7 @@ def parse_run_list(run_list):
129129
parts = part.split('-', 1)
130130
first = int(parts[0])
131131
last = int(parts[1])
132-
for run in range(first, last + 1):
133-
runs.append(run)
132+
runs.extend(range(first, last + 1))
134133
else:
135134
runs.append(int(part))
136135
except ValueError:

pyperf/tests/test_bench.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -346,11 +346,9 @@ def test_remove_all_metadata(self):
346346
{'name': 'bench', 'unit': 'byte'})
347347

348348
def test_update_metadata(self):
349-
runs = []
350-
for value in (1.0, 2.0, 3.0):
351-
runs.append(pyperf.Run((value,),
352-
metadata={'name': 'bench'},
353-
collect_metadata=False))
349+
runs = [pyperf.Run((value,),
350+
metadata={'name': 'bench'},
351+
collect_metadata=False) for value in (1.0, 2.0, 3.0)]
354352
bench = pyperf.Benchmark(runs)
355353
self.assertEqual(bench.get_metadata(),
356354
{'name': 'bench'})

0 commit comments

Comments
 (0)