forked from bytedance/xpu-perf
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsimple_benchmark.py
More file actions
154 lines (124 loc) · 4.81 KB
/
simple_benchmark.py
File metadata and controls
154 lines (124 loc) · 4.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#!/usr/bin/env python3
"""
Simple ByteMLPerf Benchmark Runner
==================================
Runs all workloads in byte_micro_perf and outputs a CSV with workload names and latencies.
Clean, minimal implementation with no extra features.
Usage: python simple_benchmark.py
Output: benchmark_results.csv
"""
import os
import sys
import subprocess
import pathlib
import json
import csv
from typing import List, Dict
def find_all_workloads() -> List[pathlib.Path]:
"""Find all .json workload files in byte_micro_perf/workloads/"""
workloads_dir = pathlib.Path("byte_micro_perf/workloads")
if not workloads_dir.exists():
print(f"Error: {workloads_dir} not found")
sys.exit(1)
json_files = list(workloads_dir.rglob("*.json"))
return sorted(json_files)
def run_single_workload(workload_file: pathlib.Path) -> Dict:
"""Run a single workload and return the result"""
workload_name = workload_file.stem
task_dir = workload_file.parent
print(f"Running {workload_name}...")
# Run the benchmark
cmd = [
sys.executable, "byte_micro_perf/launch.py",
"--hardware_type", "GPU",
"--task_dir", str(task_dir),
"--task", workload_name,
"--report_dir", "temp_results",
"--log_level", "ERROR" # Suppress verbose output
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
if result.returncode != 0:
return {"workload": workload_name, "latency_us": "FAILED", "error": result.stderr}
except subprocess.TimeoutExpired:
return {"workload": workload_name, "latency_us": "TIMEOUT", "error": "Execution timeout"}
except Exception as e:
return {"workload": workload_name, "latency_us": "ERROR", "error": str(e)}
# Parse results to get median latency
latency = extract_latency_from_results(workload_name)
return {"workload": workload_name, "latency_us": latency, "error": ""}
def extract_latency_from_results(workload_name: str) -> str:
"""Extract median latency from benchmark results"""
try:
# Look for result files
results_dir = pathlib.Path("temp_results")
jsonl_files = list(results_dir.rglob("*.jsonl"))
if not jsonl_files:
return "NO_RESULTS"
# Read the first jsonl file and get median latency
latencies = []
with open(jsonl_files[0], 'r') as f:
for line in f:
if line.strip():
data = json.loads(line)
if 'targets' in data and 'latency(us)' in data['targets']:
latencies.append(data['targets']['latency(us)'])
if latencies:
# Return median latency
latencies.sort()
median_idx = len(latencies) // 2
return f"{latencies[median_idx]:.3f}"
else:
return "NO_LATENCY_DATA"
except Exception as e:
return f"PARSE_ERROR: {str(e)}"
def cleanup_temp_results():
"""Remove temporary results directory"""
import shutil
temp_dir = pathlib.Path("temp_results")
if temp_dir.exists():
shutil.rmtree(temp_dir)
def main():
"""Main execution function"""
print("ByteMLPerf Simple Benchmark Runner")
print("=" * 40)
# Find all workloads
workloads = find_all_workloads()
print(f"Found {len(workloads)} workloads")
# Prepare results
results = []
# Run each workload
for i, workload_file in enumerate(workloads, 1):
print(f"[{i}/{len(workloads)}] ", end="")
# Clean up previous results
cleanup_temp_results()
# Run workload
result = run_single_workload(workload_file)
results.append(result)
# Show result
if result["error"]:
print(f"❌ {result['workload']}: {result['latency_us']}")
else:
print(f"✅ {result['workload']}: {result['latency_us']} μs")
# Clean up final temp results
cleanup_temp_results()
# Write CSV output
output_file = "benchmark_results.csv"
with open(output_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["workload", "latency_us"])
for result in results:
writer.writerow([result["workload"], result["latency_us"]])
# Summary
print("\n" + "=" * 40)
print(f"Results saved to: {output_file}")
successful = len([r for r in results if not r["error"]])
failed = len(results) - successful
print(f"Successful: {successful}, Failed: {failed}")
if failed > 0:
print("\nFailed workloads:")
for result in results:
if result["error"]:
print(f" - {result['workload']}: {result['latency_us']}")
if __name__ == "__main__":
main()