-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_benchmarks.py
More file actions
124 lines (101 loc) · 4.07 KB
/
run_benchmarks.py
File metadata and controls
124 lines (101 loc) · 4.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import sqlite3
import subprocess
import os
DATABASE_PATH = 'benchmarks.db'
BENCHMARKS_DIR = 'benchmarks'
def create_connection():
"""Creates a database connection to a SQLite database."""
conn = None
try:
conn = sqlite3.connect(DATABASE_PATH)
print(f"Connected to SQLite version: {sqlite3.version}")
except sqlite3.Error as e:
print(e)
return conn
def insert_benchmark(conn, name, category, description, script_path):
"""Inserts a benchmark into the benchmarks table."""
try:
sql = '''INSERT INTO benchmarks(name, category, description, script_path)
VALUES(?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, (name, category, description, script_path))
conn.commit()
return cur.lastrowid
except sqlite3.Error as e:
print(f"Error inserting benchmark: {e}")
return None
def insert_result(conn, benchmark_id, execution_time, output):
"""Inserts a benchmark result into the results table."""
sql = '''INSERT INTO results(benchmark_id, execution_time, output)
VALUES(?,?,?) '''
cur = conn.cursor()
cur.execute(sql, (benchmark_id, execution_time, output))
conn.commit()
return cur.lastrowid
def run_benchmark(script_path):
"""Runs a benchmark script and returns the output."""
try:
process = subprocess.run(
['python3', script_path], capture_output=True, text=True, check=True)
return process.stdout
except subprocess.CalledProcessError as e:
print(f"Error running benchmark {script_path}: {e}")
return None
def parse_benchmark_output(output):
"""Parses the benchmark output and extracts the results."""
results = []
lines = output.splitlines()
# Skip header lines
data_lines = [line.strip() for line in lines if line.strip()
and not line.startswith(('-', 'Benchmarking', 'Size'))]
for line in data_lines:
try:
parts = line.split()
if len(parts) >= 4:
results.append({
'Size': int(parts[0]),
'List (s)': float(parts[1]),
'Tuple (s)': float(parts[2]),
'Ratio': float(parts[3].replace('x', ''))
})
except (ValueError, IndexError) as e:
print(f"Warning: Could not parse line: {line} - {e}")
continue
return results
def find_benchmark_scripts(benchmarks_dir):
"""Finds all benchmark scripts in the specified directory."""
scripts = []
for root, _, files in os.walk(benchmarks_dir):
for file in files:
if file.endswith('.py'):
scripts.append(os.path.join(root, file))
return scripts
if __name__ == '__main__':
conn = create_connection()
if conn:
scripts = find_benchmark_scripts(BENCHMARKS_DIR)
for script in scripts:
print(f"Running benchmark: {script}")
output = run_benchmark(script)
if output:
# Extract the benchmark name and category from the script path
parts = script.split(os.sep)
if len(parts) >= 3:
category = parts[1]
name = parts[-1].replace('.py', '')
else:
category = 'Unknown'
name = script # Use the full path as the name if parsing fails
# Insert benchmark metadata into the database
benchmark_id = insert_benchmark(
conn, name, category, "", script)
# Parse the output and insert the results into the database
results = parse_benchmark_output(output)
for result in results:
# Average execution time
execution_time = (
result['List (s)'] + result['Tuple (s)']) / 2
insert_result(conn, benchmark_id,
execution_time, str(result))
conn.close()
print("Benchmarks completed and results stored in the database.")