-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathformat_results.py
executable file
·182 lines (149 loc) · 5.58 KB
/
format_results.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#!/usr/bin/env python
"""
Helper script to extract all the data from *.dat files and generate one table
"""
import argparse
import csv
import glob
import os
import re
# Values we care about
keys = []
keys.append('num_events')
keys.append('num_edges')
keys.append('num_ops')
keys.append('num_read')
keys.append('num_writes')
keys.append('num_time_edges')
keys.append('num_races')
keys.append('num_harmful')
keys.append('num_commute')
keys.append('num_covered')
keys.append('num_time_filtered_races')
keys.append('num_pkts')
keys.append('num_per_pkt_races')
keys.append('num_per_pkt_inconsistent')
keys.append('num_per_pkt_inconsistent_covered')
keys.append('num_per_pkt_entry_version_race')
keys.append('num_per_pkt_inconsistent_no_repeat')
keys.append('num_versions')
keys.append('num_racing_versions')
table_keys = ['rw_delta', 'ww_delta', 'alt_barr', 'data_deps']
table_keys.extend(keys)
timing_keys = []
timing_keys.append('total_time_sec')
timing_keys.append('load_time_sec')
timing_keys.append('detect_races_time_sec')
timing_keys.append('extract_traces_time_sec')
timing_keys.append('find_reactive_cmds_time_sec')
timing_keys.append('find_proactive_cmds_time_sec')
timing_keys.append('find_covered_races_time')
timing_keys.append('per_packet_inconsistent_time_sec')
timing_keys.append('find_inconsistent_update_time_sec')
timing_table_keys = ['rw_delta', 'ww_delta', 'alt_barr', 'data_deps']
timing_table_keys.extend(timing_keys)
okay_to_ignore = ['key', 'rw_delta', 'ww_delta', 'alt_barr', 'data_deps']
def natural_keys(text):
def atoi(text):
return int(text) if text.isdigit() else text
return [atoi(c) for c in re.split('(\d+)', text)]
def read_dat_files(file_keys, infiles):
table = {}
for key in file_keys:
table[key] = {}
for fname in infiles:
print "READING FILE", fname
with open(fname, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
t = None
ab = None
for row in data:
key, value = row[0].strip(), row[1].strip()
if key in ['alt_barr']:
ab = value
if key in ['rw_delta', 'ww_delta']:
if value != 'inf':
value = int(value)
if t is None:
t = value
else:
# TODO(jm): This is a really weird way of checking that rw_delta == ww_delta.
# We should make t a tuple (t_rw, t_ww) and print that out.
# If this assertion fails you might need to delete any *.dat files
# remaining from previous runs where rw_delta != ww_delta
assert t == value
if key in table:
table[key][str(ab) + '-' + str(t)] = value
elif key not in okay_to_ignore:
print "Ignored row", row
return table
def save_to_csv(file_keys, table, outname):
print "Saving results to", outname
with open(outname, 'w') as f:
wr = csv.writer(f, delimiter=',')
for key, values in table.iteritems():
row = ['key/t'] + list(sorted(values.keys(), key=natural_keys))
wr.writerow(row)
break
for key in file_keys:
values = table[key]
times = sorted(values.keys(), key=natural_keys)
row = [key] + [values[t] for t in times]
wr.writerow(row)
def read_dat_files_table(file_keys, infiles):
table = []
for fname in infiles:
print "READING FILE ALT", fname
loaded = {}
with open(fname, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
t = None
for row in data:
key, value = row[0].strip(), row[1].strip()
if key in ['rw_delta', 'ww_delta']:
if value != 'inf':
value = int(value)
if t is None:
t = value
else:
# TODO(jm): This is a really weird way of checking that rw_delta == ww_delta.
# We should make t a tuple (t_rw, t_ww) and print that out.
# If this assertion fails you might need to delete any *.dat files
# remaining from previous runs where rw_delta != ww_delta
assert t == value
loaded[key] = value
row = []
for k in file_keys:
row.append(loaded[k])
table.append(row)
table.sort(key=lambda x: x[0])
table.insert(0, file_keys)
return table
def save_to_csv_table(table, outname):
print "Saving results to", outname
with open(outname, 'w') as f:
wr = csv.writer(f, delimiter=',')
for row in table:
wr.writerow(row)
return table
def main(result_dir):
# Reading files
table = read_dat_files(keys, glob.glob(os.path.join(result_dir, 'results*.dat')))
timings_table = read_dat_files(timing_keys, glob.glob(os.path.join(result_dir, 'timings*.dat')))
table_tbl = read_dat_files_table(table_keys, glob.glob(os.path.join(result_dir, 'results*.dat')))
timings_tbl= read_dat_files_table(timing_table_keys, glob.glob(os.path.join(result_dir, 'timings*.dat')))
# Saving summary
save_to_csv(keys, table, os.path.join(result_dir, 'summary.csv'))
# Saving timing results
save_to_csv(timing_keys, timings_table,
os.path.join(result_dir, 'summary_timings.csv'))
save_to_csv_table(table_tbl, os.path.join(result_dir, 'summary_tbl.csv'))
save_to_csv_table(timings_tbl, os.path.join(result_dir, 'summary_timings_tbl.csv'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Helper script to extract all the data from *.dat files and'
' generates two csv files: summary.csv and summary_timings.csv')
parser.add_argument('result_dir',
help='Directory where [results|timings]*.dat files are located')
args = parser.parse_args()
main(args.result_dir)