Skip to content

Commit d2ff49f

Browse files
authored
Merge pull request #42 from MunchLab/benchmarks
Add benchmark workflow for ECT package and benchmark scripts
2 parents 4d65b0a + 6b4afa4 commit d2ff49f

6 files changed

Lines changed: 225 additions & 0 deletions

File tree

.github/workflows/benchmark.yml

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
name: Benchmark
2+
on:
3+
push:
4+
branches: [ "main" ]
5+
paths:
6+
- '**.py'
7+
- 'benchmarks/**'
8+
9+
jobs:
10+
benchmark:
11+
runs-on: ubuntu-latest
12+
steps:
13+
- uses: actions/checkout@v4
14+
15+
- name: Set up Python
16+
uses: actions/setup-python@v4
17+
with:
18+
python-version: '3.10'
19+
20+
- name: Install uv
21+
run: |
22+
curl -LsSf https://astral.sh/uv/install.sh | sh
23+
24+
- name: Create venv and install dependencies
25+
run: |
26+
uv venv
27+
source .venv/bin/activate
28+
uv pip install -e .
29+
uv pip install numpy matplotlib
30+
31+
- name: Run benchmarks
32+
run: |
33+
source .venv/bin/activate
34+
python benchmarks/run_benchmarks.py
35+
36+
- name: Store benchmark results
37+
uses: actions/upload-artifact@v3
38+
with:
39+
name: benchmark-results
40+
path: benchmark_results/

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
doc_source/notebooks/Matisse/outlines/*
44
*.DS_Store
55

6+
benchmarks/results/*
7+
68
# Byte-compiled / optimized / DLL files
79
__pycache__/
810
*.py[cod]

Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@ html:
2929
sphinx-build -M html doc_source docs
3030
rsync -a docs/html/ docs/
3131
rm -r docs/html
32+
33+
benchmark:
34+
python benchmarks/run_benchmarks.py
3235

3336
all:
3437
# Running autopep8

benchmarks/benchmark_cw.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
"""Benchmarks for CW complex computations"""
2+
import numpy as np
3+
import time
4+
from ect import ECT, EmbeddedCW, create_example_cw
5+
import json
6+
from pathlib import Path
7+
8+
9+
def benchmark_cw_ect(num_runs=5):
10+
"""Benchmark ECT computation on CW complexes"""
11+
results = {}
12+
13+
configs = [
14+
(8, 10), # Small
15+
(36, 36), # Medium
16+
(360, 360), # Large
17+
]
18+
19+
for num_dir, num_thresh in configs:
20+
times = []
21+
K = create_example_cw()
22+
23+
print(
24+
f"\nTesting ECT with {num_dir} directions, {num_thresh} thresholds")
25+
for _ in range(num_runs):
26+
start_time = time.time()
27+
28+
myect = ECT(num_dirs=num_dir, num_thresh=num_thresh)
29+
myect.calculateECT(K)
30+
31+
execution_time = time.time() - start_time
32+
times.append(execution_time)
33+
34+
results[f'dirs_{num_dir}_thresh_{num_thresh}'] = {
35+
'mean_time': float(np.mean(times)),
36+
'std_time': float(np.std(times)),
37+
'min_time': float(np.min(times)),
38+
'max_time': float(np.max(times))
39+
}
40+
41+
return results
42+
43+
44+
if __name__ == "__main__":
45+
print("Running CW complex benchmarks...")
46+
results = benchmark_cw_ect()
47+
48+
# Save results
49+
output_dir = Path("benchmark_results")
50+
output_dir.mkdir(exist_ok=True)
51+
52+
with open(output_dir / "cw_results.json", "w") as f:
53+
json.dump(results, f, indent=2)

benchmarks/benchmark_graph.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
"""Benchmarks for graph-based ECT computations"""
2+
import numpy as np
3+
import time
4+
from ect import ECT, EmbeddedGraph
5+
6+
7+
def create_test_shape(num_points=1000, complexity=1):
8+
"""Create test shape with varying complexity"""
9+
t = np.linspace(0, 2*np.pi, num_points)
10+
x = np.cos(t)
11+
y = np.sin(t)
12+
13+
for i in range(2, complexity + 2):
14+
x += (1/i) * np.cos(i*t)
15+
y += (1/i) * np.sin(i*t)
16+
17+
return np.column_stack([x, y])
18+
19+
20+
def benchmark_graph_ect(num_runs=5):
21+
"""Benchmark ECT computation on graphs"""
22+
results = {}
23+
24+
configs = [
25+
(100, 1),
26+
(1000, 1),
27+
(100, 3),
28+
(1000, 3),
29+
(10000, 3),
30+
]
31+
32+
for points, complexity in configs:
33+
shape = create_test_shape(points, complexity)
34+
G = EmbeddedGraph()
35+
G.add_cycle(shape)
36+
37+
times = []
38+
print(
39+
f"\nTesting shape with {points} points and complexity {complexity}")
40+
41+
for _ in range(num_runs):
42+
start_time = time.time()
43+
myect = ECT(num_dirs=360, num_thresh=360)
44+
myect.calculateECT(G)
45+
times.append(time.time() - start_time)
46+
47+
results[f'points_{points}_complexity_{complexity}'] = {
48+
'mean_time': float(np.mean(times)),
49+
'std_time': float(np.std(times)),
50+
'min_time': float(np.min(times)),
51+
'max_time': float(np.max(times))
52+
}
53+
54+
return results
55+
56+
57+
def benchmark_g_omega(num_runs=5):
58+
"""Benchmark g_omega computation"""
59+
results = {}
60+
61+
sizes = [100, 500, 1000]
62+
for size in sizes:
63+
shape = create_test_shape(size)
64+
G = EmbeddedGraph()
65+
G.add_cycle(shape)
66+
67+
times = []
68+
for _ in range(num_runs):
69+
start_time = time.time()
70+
for theta in np.linspace(0, 2*np.pi, 360):
71+
G.g_omega(theta)
72+
times.append(time.time() - start_time)
73+
74+
results[f'size_{size}'] = {
75+
'mean_time': float(np.mean(times)),
76+
'std_time': float(np.std(times))
77+
}
78+
79+
return results

benchmarks/run_benchmarks.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
"""Main benchmark runner for ECT package"""
2+
import numpy as np
3+
import time
4+
from pathlib import Path
5+
import json
6+
from benchmark_graph import benchmark_graph_ect, benchmark_g_omega
7+
from benchmark_cw import benchmark_cw_ect
8+
import platform
9+
10+
11+
def run_all_benchmarks(num_runs=5):
12+
"""Run all benchmarks and collect results"""
13+
results = {
14+
'metadata': {
15+
'num_runs': num_runs,
16+
'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
17+
'platform': platform.platform(),
18+
'python_version': platform.python_version()
19+
},
20+
'benchmarks': {}
21+
}
22+
23+
print("\nRunning graph ECT benchmarks...")
24+
results['benchmarks']['graph_ect'] = benchmark_graph_ect(num_runs=num_runs)
25+
26+
print("\nRunning CW complex benchmarks...")
27+
results['benchmarks']['cw_ect'] = benchmark_cw_ect(num_runs=num_runs)
28+
29+
print("\nRunning g_omega benchmarks...")
30+
results['benchmarks']['g_omega'] = benchmark_g_omega(num_runs=num_runs)
31+
32+
return results
33+
34+
35+
def save_results(results, output_dir="benchmarks/results"):
36+
"""Save benchmark results to JSON file"""
37+
output_dir = Path(output_dir)
38+
output_dir.mkdir(exist_ok=True)
39+
40+
with open(output_dir / "benchmark_results.json", "w") as f:
41+
json.dump(results, f, indent=2)
42+
43+
print(f"\nResults saved to {output_dir}/benchmark_results.json")
44+
45+
46+
if __name__ == "__main__":
47+
results = run_all_benchmarks()
48+
save_results(results)

0 commit comments

Comments
 (0)