Skip to content

Commit 8dc7bdd

Browse files
committed
test(perf): add benchmark tests for core operations
Adds micro-benchmarks for Job.from_dict, JobRequest.to_dict, JSON serialization round-trips, and JobState.is_terminal to track deserialization performance regressions.
1 parent f6a05eb commit 8dc7bdd

1 file changed

Lines changed: 150 additions & 0 deletions

File tree

tests/test_benchmark.py

Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
"""Benchmark tests for OJS Python SDK core operations.
2+
3+
Run with: pytest tests/test_benchmark.py -v
4+
"""
5+
6+
import json
7+
import time
8+
9+
import pytest
10+
11+
from ojs.job import Job, JobRequest, JobState
12+
13+
14+
MINIMAL_JOB_DICT = {
15+
"id": "019539a4-b68c-7def-8000-1a2b3c4d5e6f",
16+
"type": "email.send",
17+
"state": "available",
18+
"queue": "default",
19+
"args": [{"to": "user@example.com"}],
20+
"attempt": 0,
21+
"created_at": "2024-01-15T10:30:00Z",
22+
}
23+
24+
FULL_JOB_DICT = {
25+
"id": "019539a4-b68c-7def-8000-1a2b3c4d5e6f",
26+
"type": "email.send",
27+
"state": "active",
28+
"queue": "email",
29+
"args": [{"to": "user@example.com", "subject": "Welcome!"}],
30+
"priority": 10,
31+
"attempt": 2,
32+
"max_retries": 5,
33+
"tags": ["onboarding", "email"],
34+
"meta": {"campaign_id": "123", "source": "api"},
35+
"created_at": "2024-01-15T10:30:00Z",
36+
"scheduled_at": "2024-01-15T11:00:00Z",
37+
}
38+
39+
40+
class TestJobDeserialization:
41+
"""Benchmark Job.from_dict() for various payload sizes."""
42+
43+
def test_bench_from_dict_minimal(self) -> None:
44+
iterations = 10_000
45+
start = time.perf_counter()
46+
for _ in range(iterations):
47+
Job.from_dict(MINIMAL_JOB_DICT)
48+
elapsed = time.perf_counter() - start
49+
per_op = elapsed / iterations * 1_000_000 # microseconds
50+
print(f"\n Job.from_dict (minimal): {per_op:.2f} µs/op ({iterations} iterations)")
51+
assert per_op < 1000, f"Too slow: {per_op:.2f} µs/op"
52+
53+
def test_bench_from_dict_full(self) -> None:
54+
iterations = 10_000
55+
start = time.perf_counter()
56+
for _ in range(iterations):
57+
Job.from_dict(FULL_JOB_DICT)
58+
elapsed = time.perf_counter() - start
59+
per_op = elapsed / iterations * 1_000_000
60+
print(f"\n Job.from_dict (full): {per_op:.2f} µs/op ({iterations} iterations)")
61+
assert per_op < 1000, f"Too slow: {per_op:.2f} µs/op"
62+
63+
64+
class TestJobRequestSerialization:
65+
"""Benchmark JobRequest.to_dict() for various payload sizes."""
66+
67+
def test_bench_to_dict_minimal(self) -> None:
68+
req = JobRequest(type="email.send", args=["user@example.com"])
69+
iterations = 10_000
70+
start = time.perf_counter()
71+
for _ in range(iterations):
72+
req.to_dict()
73+
elapsed = time.perf_counter() - start
74+
per_op = elapsed / iterations * 1_000_000
75+
print(f"\n JobRequest.to_dict (minimal): {per_op:.2f} µs/op ({iterations} iterations)")
76+
assert per_op < 1000, f"Too slow: {per_op:.2f} µs/op"
77+
78+
def test_bench_to_dict_full(self) -> None:
79+
req = JobRequest(
80+
type="email.send",
81+
args=["user@example.com", "Welcome!"],
82+
queue="email",
83+
priority=10,
84+
tags=["onboarding"],
85+
)
86+
iterations = 10_000
87+
start = time.perf_counter()
88+
for _ in range(iterations):
89+
req.to_dict()
90+
elapsed = time.perf_counter() - start
91+
per_op = elapsed / iterations * 1_000_000
92+
print(f"\n JobRequest.to_dict (full): {per_op:.2f} µs/op ({iterations} iterations)")
93+
assert per_op < 1000, f"Too slow: {per_op:.2f} µs/op"
94+
95+
96+
class TestJsonSerialization:
97+
"""Benchmark raw JSON serialization round-trips."""
98+
99+
def test_bench_json_dumps_minimal(self) -> None:
100+
iterations = 10_000
101+
start = time.perf_counter()
102+
for _ in range(iterations):
103+
json.dumps(MINIMAL_JOB_DICT)
104+
elapsed = time.perf_counter() - start
105+
per_op = elapsed / iterations * 1_000_000
106+
print(f"\n json.dumps (minimal): {per_op:.2f} µs/op ({iterations} iterations)")
107+
108+
def test_bench_json_dumps_full(self) -> None:
109+
iterations = 10_000
110+
start = time.perf_counter()
111+
for _ in range(iterations):
112+
json.dumps(FULL_JOB_DICT)
113+
elapsed = time.perf_counter() - start
114+
per_op = elapsed / iterations * 1_000_000
115+
print(f"\n json.dumps (full): {per_op:.2f} µs/op ({iterations} iterations)")
116+
117+
def test_bench_json_loads_minimal(self) -> None:
118+
data = json.dumps(MINIMAL_JOB_DICT)
119+
iterations = 10_000
120+
start = time.perf_counter()
121+
for _ in range(iterations):
122+
json.loads(data)
123+
elapsed = time.perf_counter() - start
124+
per_op = elapsed / iterations * 1_000_000
125+
print(f"\n json.loads (minimal): {per_op:.2f} µs/op ({iterations} iterations)")
126+
127+
def test_bench_json_loads_full(self) -> None:
128+
data = json.dumps(FULL_JOB_DICT)
129+
iterations = 10_000
130+
start = time.perf_counter()
131+
for _ in range(iterations):
132+
json.loads(data)
133+
elapsed = time.perf_counter() - start
134+
per_op = elapsed / iterations * 1_000_000
135+
print(f"\n json.loads (full): {per_op:.2f} µs/op ({iterations} iterations)")
136+
137+
138+
class TestJobStateOperations:
139+
"""Benchmark state-related operations."""
140+
141+
def test_bench_is_terminal(self) -> None:
142+
states = list(JobState)
143+
iterations = 100_000
144+
start = time.perf_counter()
145+
for _ in range(iterations):
146+
for s in states:
147+
s.is_terminal()
148+
elapsed = time.perf_counter() - start
149+
per_op = elapsed / (iterations * len(states)) * 1_000_000
150+
print(f"\n JobState.is_terminal: {per_op:.2f} µs/op ({iterations * len(states)} iterations)")

0 commit comments

Comments
 (0)