diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a402b70..22a0944 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,8 +46,11 @@ jobs: with: python-version: "3.12" + - name: Install dependencies + run: uv sync --all-extras --group dev + - name: Type check - run: uvx mypy --strict protest + run: uv run mypy protest test: needs: lint @@ -103,7 +106,7 @@ jobs: files: coverage.xml fail_ci_if_error: false -c docs: + docs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 diff --git a/docs/core-concepts/console.md b/docs/core-concepts/console.md new file mode 100644 index 0000000..b172246 --- /dev/null +++ b/docs/core-concepts/console.md @@ -0,0 +1,49 @@ +# Console Output + +Print progress and debug messages that bypass test capture. + +## The Problem + +`print()` inside tests and fixtures is captured by ProTest. During long-running fixtures (pipeline imports, graph seeding), there's no visible feedback. + +## `console.print` + +```python +from protest import console + +@fixture() +async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[cyan]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + return driver +``` + +Messages appear inline in the reporter output, between test results. + +## Rich Markup + +`console.print` supports Rich markup. The Rich reporter renders colors; the ASCII reporter strips tags. + +```python +console.print(f"[bold green]done[/] in {duration:.1f}s") +console.print(f"[yellow]warning:[/] slow query ({elapsed:.2f}s)") +``` + +## Raw Mode + +Skip markup processing with `raw=True`: + +```python +console.print("debug: raw bytes here", raw=True) +``` + +The message is passed as-is to both reporters. + +## How It Works + +`console.print` sends a `USER_PRINT` event through the event bus. The reporter receives it and writes to the real stdout (bypassing test capture). This means: + +- Messages appear immediately, not buffered until test end +- Works with `-n 4` (concurrent tests) — the event bus serializes per plugin +- No interference with test capture or `result.output` diff --git a/docs/evals.md b/docs/evals.md new file mode 100644 index 0000000..006c403 --- /dev/null +++ b/docs/evals.md @@ -0,0 +1,555 @@ +# Evals + +Evaluate LLM outputs with scored metrics and historical tracking. + +## What is an Eval? + +A test produces **pass/fail**. An eval produces **scores** — numeric values (0.0–1.0) that measure output quality. Scores are aggregated across cases, tracked over time, and compared between runs. + +ProTest evals use the same infrastructure as tests: fixtures, DI, parallelism, tags. An eval is a test that returns a value, scored by evaluators. + +## Quick Start + +```python +# evals/session.py +from typing import Annotated + +from protest import ForEach, From +from protest.evals import EvalCase, EvalSession, ModelInfo, evaluator +from protest.evals.evaluators import contains_keywords + +cases = ForEach([ + EvalCase(inputs="Who is Marie?", expected="Marie, Resistance", name="lookup"), + EvalCase(inputs="What is 2+2?", expected="4", name="math"), +]) + +session = EvalSession(model=ModelInfo(name="gpt-4o-mini")) + +@session.eval(evaluators=[contains_keywords(keywords=["Marie"])]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> str: + return await my_agent(case.inputs) +``` + +```bash +protest eval evals.session:session +``` + +## How It Works + +`@session.eval()` wraps a function to run evaluators on its return value: + +1. Your function receives case data via `ForEach`/`From` (same as parameterized tests) +2. It returns the output (string, object, anything) +3. ProTest passes the output to evaluators → scores +4. Bool verdicts determine pass/fail +5. Aggregated stats appear in the terminal + +The rest of the pipeline — fixtures, DI, parallelism, reporters — works identically to tests. + +## EvalSession + +`EvalSession` is a session configured for evals. History is enabled by default. + +```python +from protest.evals import EvalSession, ModelInfo + +session = EvalSession( + model=ModelInfo(name="gpt-4o-mini"), # tracked in history + concurrency=4, # parallel eval cases + metadata={"version": "1.0"}, # stored in history +) +``` + +## EvalCase + +Typed dataclass for eval case data. Provides IDE autocompletion instead of untyped dicts. + +```python +from protest.evals import EvalCase + +cases = ForEach([ + EvalCase(inputs="What is 2+2?", expected="4", name="math"), + EvalCase(inputs="Who is Napoleon?", expected="emperor, France", name="history"), +]) +``` + +| Field | Type | Description | +|-------|------|-------------| +| `inputs` | `Any` | Input to your task function | +| `expected` | `Any` | Expected output (passed to evaluators as `ctx.expected_output`) | +| `name` | `str` | Case identifier (used in test IDs and history) | +| `evaluators` | `list` | Per-case evaluators (added to suite-level ones) | +| `metadata` | `dict` | Arbitrary metadata | + +## Evaluators + +An evaluator is a function decorated with `@evaluator` that receives an `EvalContext` and returns a verdict. + +### Return Types + +Evaluators return `bool` (simple verdict) or a `dataclass` (structured result). In dataclasses, annotate fields to tell the framework what each one is: + +```python +from typing import Annotated +from protest.evals import Metric, Verdict, Reason +``` + +| Annotation | Role | +|------------|------| +| `Annotated[bool, Verdict]` | Verdict — pass/fail (`all(verdicts)`) | +| `Annotated[float, Metric]` | Metric — aggregated in stats (mean/p50/p95) | +| `Annotated[int, Metric]` | Metric — converted to float | +| `Annotated[str, Reason]` | Reason — displayed on failure, stored in history | + +Unannotated fields are ignored by the runner — free metadata. + +Returning `float`, `dict`, or any other non-dataclass/non-bool type raises `TypeError`. + +### Tracking-Only Evaluators + +A dataclass with `Metric` fields but no `Verdict` is tracking-only. The case always passes for this evaluator — it measures without gating. + +```python +@dataclass +class OverlapMetrics: + overlap: Annotated[float, Metric] + +@evaluator +def word_overlap(ctx: EvalContext) -> OverlapMetrics: + ... +``` + +In the terminal, tracking evaluators show with `·` instead of `✓`/`✗`: + +``` +✓ chatbot[lookup] (1.2s) keyword_recall=0.95 all_present=✓ +· chatbot[lookup] overlap=0.80 +``` + +### Simple Evaluator + +```python +@evaluator +def not_empty(ctx: EvalContext) -> bool: + return bool(ctx.output.strip()) +``` + +### Structured Evaluator + +```python +from dataclasses import dataclass +from typing import Annotated +from protest.evals import Metric, Verdict, Reason + +@dataclass +class KeywordScores: + keyword_recall: Annotated[float, Metric] + all_present: Annotated[bool, Verdict] + detail: Annotated[str, Reason] = "" + +@evaluator +def keyword_check(ctx: EvalContext, keywords: list[str], min_recall: float = 0.5) -> KeywordScores: + found = [k for k in keywords if k.lower() in ctx.output.lower()] + recall = len(found) / len(keywords) + return KeywordScores( + keyword_recall=recall, + all_present=recall >= min_recall, + detail=f"found {len(found)}/{len(keywords)}", + ) +``` + +The threshold (`min_recall`) is a parameter of the evaluator, not a framework concept. The evaluator decides the verdict. + +### Async (LLM Judge) + +Use `ctx.judge()` for structured LLM evaluation (requires `judge=` on `EvalSession`): + +```python +@dataclass +class JudgeResult: + accuracy: Annotated[float, Metric] + accurate_enough: Annotated[bool, Verdict] + reason: Annotated[str, Reason] = "" + +@evaluator +async def llm_judge(ctx: EvalContext, rubric: str = "", min_score: float = 0.7) -> JudgeResult: + return await ctx.judge( + f"Evaluate this response on a 0-1 scale.\n\n" + f"Response: {ctx.output}\nCriteria: {rubric}", + JudgeResult, + ) +``` + +The judge handles structured output — no text parsing needed. See [Judge](#judge) for setup. + +### Per-Case Thresholds + +Different thresholds per case = different evaluator bindings: + +```python +EvalCase(inputs="easy lookup", evaluators=[keyword_check(keywords=["paris"], min_recall=0.9)]), +EvalCase(inputs="hard causal", evaluators=[keyword_check(keywords=["paris"], min_recall=0.3)]), +``` + +### ShortCircuit + +Skip expensive evaluators (LLM judges) when cheap ones already fail: + +```python +from protest.evals import ShortCircuit + +evaluators=[ + not_empty, # always runs + ShortCircuit([ + contains_expected_facts(min_score=0.3), # 0ms — if fail → stop + llm_judge(rubric="factual accuracy"), # 3s — skipped if above fails + ]), +] +``` + +`ShortCircuit` is a group of ordered evaluators. The first `Verdict=False` stops the group. Evaluators outside the `ShortCircuit` always run. + +### Using Evaluators + +```python +# No params → use directly +evaluators=[not_empty] + +# With params → call to bind +evaluators=[keyword_check(keywords=["python", "async"], min_recall=0.75)] + +# Per-case evaluators (added to suite-level) +EvalCase(inputs="...", evaluators=[llm_judge(rubric="Check factual accuracy")]) +``` + +### EvalContext + +| Field / Method | Type | Description | +|----------------|------|-------------| +| `name` | `str` | Case name | +| `inputs` | `I` | Case inputs | +| `output` | `O` | Task return value | +| `expected_output` | `O \| None` | From `EvalCase.expected` | +| `metadata` | `Any` | From `EvalCase.metadata` | +| `duration` | `float` | Task execution time (seconds) | +| `judge(prompt, type)` | `async` | Call the configured LLM judge (see [Judge](#judge)) | +| `judge_call_count` | `int` | Number of judge calls made | + +### Built-in Evaluators + +| Evaluator | Params | Returns | +|-----------|--------|---------| +| `contains_keywords` | `keywords, min_recall=0.0` | `keyword_recall: float`, `all_keywords_present: bool` | +| `contains_expected` | `case_sensitive=False` | `bool` | +| `does_not_contain` | `forbidden` | `no_forbidden_words: bool` | +| `not_empty` | — | `bool` | +| `max_length` | `max_chars=500` | `conciseness: float`, `within_limit: bool` | +| `min_length` | `min_chars=1` | `bool` | +| `matches_regex` | `pattern` | `bool` | +| `json_valid` | `required_keys=[]` | `valid_json: bool`, `has_required_keys: bool` | +| `word_overlap` | — | `overlap: float` (tracking-only) | + +## Fixtures + +Evals use the same fixture system as tests. Expensive setup (database, pipeline, graph) runs once and is shared across all cases. + +```python +@fixture() +async def pipeline(): + driver = await build_pipeline() # 3 minutes, once + yield driver + await driver.close() + +session.bind(pipeline) + +@session.eval(evaluators=[my_scorer]) +async def pipeline_eval( + case: Annotated[EvalCase, From(cases)], + driver: Annotated[AsyncDriver, Use(pipeline)], +) -> QueryResult: + return await query(driver, case.inputs) +``` + +## ModelInfo + +`ModelInfo` is a **label for history tracking** — it does not configure or route to any model. It records which model produced the results so you can compare runs. + +```python +session = EvalSession(model=ModelInfo(name="qwen-2.5")) +``` + +## Judge + +A `Judge` is a protocol for LLM-as-judge evaluators. ProTest owns the interface — you plug in your LLM library. + +### The Protocol + +```python +class Judge(Protocol): + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: ... +``` + +Minimal contract: takes a prompt and a return type, returns a `JudgeResponse` wrapping the typed result with optional usage stats. All configuration (model, temperature, system prompt, max_tokens) lives in your implementation's constructor, not in the protocol. + +### Writing a Judge + +The `judge()` method returns a `JudgeResponse[T]` that wraps the output with optional usage stats: + +```python +from pydantic_ai import Agent +from protest.evals import JudgeResponse + +class PydanticAIJudge: + name = "gpt-4o-mini" # used in history + provider = "openai" # optional, used in history + + def __init__(self, model: str = "gpt-4o-mini", temperature: float = 0): + self.model = model + self.temperature = temperature + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: + agent = Agent(self.model, output_type=output_type) + result = await agent.run(prompt) + usage = result.usage() + return JudgeResponse( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=usage.request_tokens * 0.15/1e6 + usage.response_tokens * 0.60/1e6, + ) +``` + +Tokens and cost are optional — omit them if your provider doesn't expose usage data: + +```python +return JudgeResponse(output=result.output) # tokens/cost = None, that's fine +``` + +### Configuring the Judge + +```python +session = EvalSession( + model=ModelInfo(name="qwen-2.5"), + judge=PydanticAIJudge(model="gpt-4o-mini", temperature=0), +) +``` + +`JudgeInfo` (name, provider) is derived automatically from the instance for history tracking. + +### Using the Judge in Evaluators + +Evaluators access the judge via `ctx.judge()`: + +```python +@dataclass +class JudgeResult: + accurate: Annotated[bool, Verdict] + reason: Annotated[str, Reason] = "" + +@evaluator +async def llm_rubric(ctx: EvalContext, rubric: str = "") -> JudgeResult: + return await ctx.judge( + f"Evaluate this response.\n\nResponse: {ctx.output}\nCriteria: {rubric}", + JudgeResult, # structured output — no text parsing + ) +``` + +For simple verdicts, use `bool` or `str` as `output_type`: + +```python +@evaluator +async def simple_judge(ctx: EvalContext) -> bool: + return await ctx.judge(f"Is this a valid answer? {ctx.output}", bool) +``` + +### No Judge Configured + +If an evaluator calls `ctx.judge()` and no judge was passed to `EvalSession`, a `RuntimeError` is raised. This is treated as an **infrastructure error** (not a test failure), same as a fixture crash. + +### Usage Tracking + +Each call to `ctx.judge()` is counted. Tokens and cost from `JudgeResponse` are accumulated per case and flow to `EvalPayload`: + +| Field | Description | +|-------|-------------| +| `judge_call_count` | Number of judge calls | +| `judge_input_tokens` | Total input tokens | +| `judge_output_tokens` | Total output tokens | +| `judge_cost` | Total cost (user-computed) | + +These are available in history, letting you track LLM usage across runs. + +## TaskResult (SUT Usage Tracking) + +If your eval task calls an LLM, you can report usage by returning `TaskResult` instead of a plain value: + +```python +from protest.evals import TaskResult + +@session.eval(evaluators=[my_scorer]) +async def chatbot(case: Annotated[EvalCase, From(cases)]) -> TaskResult[str]: + result = await agent.run(case.inputs) + usage = result.usage() + return TaskResult( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=usage.request_tokens * 0.10/1e6 + usage.response_tokens * 0.30/1e6, + ) +``` + +This is **opt-in** — returning a plain `str` still works. ProTest unwraps `TaskResult` transparently: evaluators see the plain output, usage stats flow to the reporter and history. + +## Usage Display + +When task or judge usage data is available, ProTest shows a summary after the eval stats: + +``` + Passed: 16/26 (61.5%) + Task: 45.2k in / 27.1k out, $0.0142 + Judge: 5 calls, 800 in / 400 out, $0.0030 +``` + +Lines only appear when there is data. No `TaskResult` = no Task line. No judge configured = no Judge line. + +## Evaluator Errors + +If an evaluator raises an exception (e.g. LLM judge timeout), the case is marked as **error** (not fail). The stack trace appears in the output. + +> **Tip:** For non-deterministic evaluators (LLM judges), catch exceptions in the evaluator and return a verdict indicating failure rather than letting them propagate. + +## Name Collisions + +If two evaluators return dataclasses with the same field name (e.g. both have `accuracy`), the runner prefixes with the evaluator name when it detects a conflict: `llm_judge.accuracy`, `fact_check.accuracy`. + +## Multi-Model Sessions + +Track which model produced each eval suite's results: + +```python +pipeline_model = ModelInfo(name="qwen-2.5") +chat_model = ModelInfo(name="mistral-7b") + +session = EvalSession(model=pipeline_model) + +@session.eval(evaluators=[...], name="pipeline", model=pipeline_model) +async def pipeline_eval(case, driver) -> str: ... + +@session.eval(evaluators=[...], name="chatbot", model=chat_model) +async def chatbot_eval(case, deps) -> str: ... +``` + +`protest history --runs` shows the model per suite: + +``` +#1 2026-03-28T09:14 57/81 (70%) cb6f7bc + pipeline 29/39 (74%) qwen-2.5 + chatbot 10/21 (48%) mistral-7b +``` + +## CLI + +```bash +# Run evals +protest eval evals.session:session + +# Parallelism +protest eval evals.session:session -n 4 + +# Filter by tag +protest eval evals.session:session --tag chatbot + +# Filter by name +protest eval evals.session:session -k "lookup" + +# Re-run failures only +protest eval evals.session:session --last-failed + +# Verbosity: scores inline +protest eval evals.session:session -v + +# Show eval inputs/output/expected on passing cases +protest eval evals.session:session --show-output + +# Show captured log records +protest eval evals.session:session --show-logs +protest eval evals.session:session --show-logs=DEBUG +``` + +Flags are independent and combinable: `-v --show-output --show-logs`. + +> **Note:** Failed eval cases always show inputs/output/expected — no flag needed. + +## Output + +### Default + +``` + ✓ chatbot[lookup] (1.2s) keyword_recall=1.00 all_keywords_present=✓ + ✗ chatbot[math]: all_keywords_present=False + │ inputs: What is 2+2? + │ output: The answer is 4. + │ expected: 4 + │ detail: found 0/1 + + Eval: chatbot (2 cases) +┏━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┳━━━━━━┓ +┃ Score ┃ mean ┃ p50 ┃ p5 ┃ p95 ┃ +┡━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━╇━━━━━━┩ +│ keyword_recall │ 0.50 │ 0.50 │ 0.00 │ 1.00 │ +└─────────────────┴──────┴──────┴──────┴──────┘ + Passed: 1/2 (50.0%) + Results: .protest/results/chatbot_20260329_091422 +``` + +### Per-Case Results + +Each eval case writes a markdown file to `.protest/results/_/`: + +``` +.protest/results/chatbot_20260329_091422/ +├── lookup.md +├── causal.md +└── negative.md +``` + +## History + +Eval results are persisted as JSONL in `.protest/history.jsonl`. Track trends across runs. + +```bash +# Run list with per-suite breakdown +protest history --evals --runs + +# Detailed view of latest run +protest history --evals --show + +# Compare last two runs (fixed/regressed/new) +protest history --evals --compare +``` + +### Integrity Hashes + +Each case in history carries two hashes: + +- **`case_hash`** — hash of inputs + expected output. Changes when the test data changes. +- **`eval_hash`** — hash of evaluators. Changes when the scoring criteria change. + +`protest history --compare` uses these hashes to detect modified cases vs regressions. If a case's `eval_hash` changed between runs, it's reported as "scoring modified" rather than a real regression. + +## Progress Output + +For long-running fixtures, use `console.print` to show progress without polluting test capture: + +```python +from protest import console + +@fixture() +async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[cyan]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + return driver +``` + +Messages appear inline in the reporter output. Rich markup is supported (stripped for ASCII). diff --git a/examples/yorkshire/app/chatbot.py b/examples/yorkshire/app/chatbot.py new file mode 100644 index 0000000..82ca519 --- /dev/null +++ b/examples/yorkshire/app/chatbot.py @@ -0,0 +1,93 @@ +"""Yorkshire Terrier Expert Chatbot — fake LLM for eval demos. + +Simulates a RAG chatbot with realistic imperfections: +- Sometimes misses keywords (simulates retrieval failures) +- Occasionally adds irrelevant info (simulates hallucination) +- Response quality varies (simulates LLM non-determinism) +""" + +from __future__ import annotations + +import random + +# Knowledge base — what a real RAG system would retrieve +YORKSHIRE_FACTS = { + "size": "Yorkshire Terriers typically weigh between 2-3 kg. They come in teacup, mini, and standard sizes.", + "grooming": "Yorkies with long coats need daily brushing. Seniors over 6 years need extra grooming care. Regular baths every 2-3 weeks.", + "temperament": "Yorkies are bold, confident, and affectionate. Despite their small size, they are courageous and sometimes stubborn.", + "health": "Common health issues include dental problems, patellar luxation, and tracheal collapse. Regular vet checkups recommended.", + "training": "Yorkies are intelligent but can be stubborn. Positive reinforcement works best. Start training early for best results.", + "diet": "Small breed formula recommended. Feed 2-3 small meals per day. Avoid chocolate, grapes, and onions.", + "exercise": "30 minutes of daily exercise is sufficient. Short walks and indoor play. Avoid extreme temperatures.", + "jobs": "Historically bred as ratters. Modern Yorkies excel as therapy dogs, influencers, and loyal companions.", + "puppies": "Yorkshire puppies need extra care until 12 months. Socialization is critical in the first 6 months.", + "seniors": "Senior Yorkies (8+ years) may slow down. Adjust exercise and diet. More frequent vet visits recommended.", +} + + +def yorkshire_chatbot(question: str) -> str: # noqa: PLR0912 + """Fake chatbot that answers questions about Yorkshire Terriers. + + Simulates a RAG pipeline: keyword matching → fact retrieval → response generation. + No LLM calls — pure string matching for deterministic eval testing. + """ + question_lower = question.lower() + + # Find relevant facts by keyword matching + relevant_facts: list[str] = [] + for topic, fact in YORKSHIRE_FACTS.items(): + if topic in question_lower or any( + word in question_lower for word in topic.split() + ): + relevant_facts.append(fact) + + # Check for specific question patterns + if "weight" in question_lower or "how heavy" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["size"]) + if "brush" in question_lower or "coat" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["grooming"]) + if "eat" in question_lower or "food" in question_lower or "feed" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["diet"]) + if "walk" in question_lower or "active" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["exercise"]) + if "old" in question_lower or "aging" in question_lower: + relevant_facts.append(YORKSHIRE_FACTS["seniors"]) + if ( + "puppy" in question_lower + or "baby" in question_lower + or "young" in question_lower + ): + relevant_facts.append(YORKSHIRE_FACTS["puppies"]) + + # Deduplicate while preserving order + seen: set[str] = set() + unique_facts = [] + for fact in relevant_facts: + if fact not in seen: + seen.add(fact) + unique_facts.append(fact) + + if not unique_facts: + return "I'm not sure about that. I specialize in Yorkshire Terrier care and health." + + response = " ".join(unique_facts) + + # Simulate LLM imperfections + # ~20% chance: drop a sentence (simulates retrieval miss) + if random.random() < 0.2 and ". " in response: # noqa: S311, PLR2004 + sentences = response.split(". ") + drop_idx = random.randint(0, len(sentences) - 1) # noqa: S311 + sentences.pop(drop_idx) + response = ". ".join(sentences) + + # ~10% chance: add irrelevant filler (simulates rambling) + if random.random() < 0.1: # noqa: S311, PLR2004 + response += " By the way, Yorkshire Terriers were originally bred in Yorkshire, England during the 19th century." + + # ~5% chance: return a vague non-answer (simulates confusion) + if random.random() < 0.05: # noqa: S311, PLR2004 + response = ( + "That's a great question about Yorkies! There are many factors to consider." + ) + + return response diff --git a/examples/yorkshire/evals/__init__.py b/examples/yorkshire/evals/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/yorkshire/evals/dataset.py b/examples/yorkshire/evals/dataset.py new file mode 100644 index 0000000..7153ab6 --- /dev/null +++ b/examples/yorkshire/evals/dataset.py @@ -0,0 +1,122 @@ +"""Dataset for the Yorkshire chatbot evals.""" + +from __future__ import annotations + +from protest import ForEach +from protest.evals.evaluators import ( + contains_keywords, + does_not_contain, + max_length, + not_empty, +) + +yorkshire_cases = ForEach( + [ + # --- Factual recall --- + { + "name": "weight_question", + "inputs": "How much does a Yorkshire Terrier weigh?", + "expected": "2-3 kg", + "metadata": {"tags": ["factual", "size"]}, + "evaluators": [ + contains_keywords(keywords=["2-3 kg", "teacup", "mini", "standard"]) + ], + }, + { + "name": "grooming_basics", + "inputs": "How often should I brush my Yorkie?", + "expected": "daily brushing for long coats", + "metadata": {"tags": ["factual", "grooming"]}, + "evaluators": [contains_keywords(keywords=["daily", "brushing", "long"])], + }, + { + "name": "diet_advice", + "inputs": "What should I feed my Yorkshire Terrier?", + "expected": "small breed formula, 2-3 meals", + "metadata": {"tags": ["factual", "diet"]}, + "evaluators": [ + contains_keywords(keywords=["small breed", "meals", "avoid"]) + ], + }, + { + "name": "exercise_needs", + "inputs": "How much exercise does a Yorkie need?", + "expected": "30 minutes daily", + "metadata": {"tags": ["factual", "exercise"]}, + "evaluators": [contains_keywords(keywords=["30 minutes", "walk"])], + }, + # --- Temperament --- + { + "name": "personality", + "inputs": "What is the temperament of a Yorkshire Terrier?", + "expected": "bold, confident, affectionate", + "metadata": {"tags": ["factual", "temperament"]}, + "evaluators": [ + contains_keywords(keywords=["bold", "confident", "affectionate"]) + ], + }, + # --- Age-specific --- + { + "name": "puppy_care", + "inputs": "How do I care for a Yorkshire puppy?", + "expected": "extra care, socialization", + "metadata": {"tags": ["factual", "puppies"]}, + "evaluators": [contains_keywords(keywords=["12 months", "socialization"])], + }, + { + "name": "senior_care", + "inputs": "My Yorkie is getting old, what should I change?", + "expected": "adjust exercise, more vet visits", + "metadata": {"tags": ["factual", "seniors"]}, + "evaluators": [contains_keywords(keywords=["senior", "exercise", "vet"])], + }, + # --- Hallucination checks --- + { + "name": "no_cat_advice", + "inputs": "Tell me about Yorkshire Terrier health", + "expected": "dental problems, patellar luxation", + "metadata": {"tags": ["safety"]}, + "evaluators": [ + does_not_contain(forbidden=["cat", "feline", "persian"]), + contains_keywords(keywords=["dental", "health"]), + ], + }, + { + "name": "no_made_up_breeds", + "inputs": "What jobs can a Yorkie do?", + "expected": "therapy dogs, companions", + "metadata": {"tags": ["safety"]}, + "evaluators": [ + does_not_contain(forbidden=["labrador", "golden retriever", "poodle"]), + contains_keywords(keywords=["therapy", "companion"]), + ], + }, + # --- Edge cases --- + { + "name": "unknown_topic", + "inputs": "What is the GDP of France?", + "expected": "I'm not sure", + "metadata": {"tags": ["edge_case"]}, + "evaluators": [contains_keywords(keywords=["not sure", "specialize"])], + }, + { + "name": "empty_question", + "inputs": "", + "expected": "I'm not sure", + "metadata": {"tags": ["edge_case"]}, + "evaluators": [contains_keywords(keywords=["not sure"])], + }, + # --- Known weak spot (chatbot doesn't know about training treats) --- + { + "name": "training_treats", + "inputs": "What treats are best for training a Yorkie?", + "expected": "small soft treats, positive reinforcement", + "metadata": {"tags": ["factual", "training"]}, + "evaluators": [ + contains_keywords(keywords=["treats", "small", "soft", "reward"]) + ], + }, + ] +) + +suite_evaluators = [not_empty, max_length(max_chars=500)] diff --git a/examples/yorkshire/evals/evaluators.py b/examples/yorkshire/evals/evaluators.py new file mode 100644 index 0000000..1008c22 --- /dev/null +++ b/examples/yorkshire/evals/evaluators.py @@ -0,0 +1,65 @@ +"""Yorkshire-specific evaluators. + +Generic evaluators come from protest.evals.evaluators. +Only project-specific ones live here. + +These also demonstrate how EvalContext generics document +what an evaluator expects as input/output types. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Annotated, Any + +from protest.evals import EvalContext, Metric, Verdict, evaluator + +# --- Text evaluator: EvalContext[Any, str] --------------------------------- +# Most evaluators work on text output. The first type param (inputs) is Any +# because evaluators don't usually care about the input shape. + + +@dataclass(frozen=True, slots=True) +class MentionsBreedResult: + breed_mentioned: Annotated[bool, Verdict] + + +@evaluator +def mentions_breed( + ctx: EvalContext[Any, str], breed: str = "Yorkshire" +) -> MentionsBreedResult: + """Check that the output mentions a specific breed.""" + return MentionsBreedResult(breed_mentioned=breed.lower() in ctx.output.lower()) + + +# --- Numeric evaluator: EvalContext[str, float] ---------------------------- +# An evaluator for a task that returns a numeric score (e.g. a classifier +# confidence, a similarity metric). The output is a float, not a string. + + +@dataclass(frozen=True, slots=True) +class ConfidenceResult: + confidence: Annotated[float, Metric] + above_threshold: Annotated[bool, Verdict] + + +@evaluator +def confidence_above( + ctx: EvalContext[str, float], threshold: float = 0.8 +) -> ConfidenceResult: + """Check that a numeric output (e.g. classifier confidence) meets a threshold.""" + return ConfidenceResult( + confidence=ctx.output, + above_threshold=ctx.output >= threshold, + ) + + +# --- Binary evaluator: EvalContext[str, bytes] ----------------------------- +# An evaluator for a task that returns raw bytes (e.g. image generation, +# audio synthesis). The evaluator checks basic properties of the output. + + +@evaluator +def output_not_empty_bytes(ctx: EvalContext[str, bytes]) -> bool: + """Check that a binary output (e.g. generated image) is not empty.""" + return len(ctx.output) > 0 diff --git a/examples/yorkshire/evals/session.py b/examples/yorkshire/evals/session.py new file mode 100644 index 0000000..7779f66 --- /dev/null +++ b/examples/yorkshire/evals/session.py @@ -0,0 +1,29 @@ +"""Yorkshire Chatbot Evals — evaluate the fake Yorkshire expert chatbot. + +Run with: + protest eval examples.yorkshire.evals.session:session + protest eval examples.yorkshire.evals.session:session -n 4 + protest eval examples.yorkshire.evals.session:session --tag safety + protest eval examples.yorkshire.evals.session:session --last-failed + protest history --evals --show +""" + +from typing import Annotated + +from examples.yorkshire.app.chatbot import yorkshire_chatbot +from examples.yorkshire.evals.dataset import ( + suite_evaluators, + yorkshire_cases, +) +from protest import From +from protest.evals import EvalSession, ModelInfo + +session = EvalSession( + model=ModelInfo(name="yorkshire-chatbot-v1", provider="local"), + metadata={"version": "1.0", "type": "keyword-matching"}, +) + + +@session.eval(evaluators=suite_evaluators) +def yorkshire_eval(case: Annotated[dict, From(yorkshire_cases)]) -> str: + return yorkshire_chatbot(case["inputs"]) diff --git a/examples/yorkshire/session.py b/examples/yorkshire/session.py new file mode 100644 index 0000000..7b8c3c3 --- /dev/null +++ b/examples/yorkshire/session.py @@ -0,0 +1,52 @@ +"""Yorkshire Terrier Unified Session — tests + evals in one session. + +Run all (tests + evals): + protest run examples.yorkshire.session:session + +Run only tests: + protest run examples.yorkshire.session:session + (protest run filters to kind=test by default) + +Run only evals: + protest eval examples.yorkshire.session:session +""" + +from examples.yorkshire.app.chatbot import yorkshire_chatbot +from examples.yorkshire.evals.dataset import dataset +from examples.yorkshire.tests.fixtures import ( + configure_kennel_logging, + kennel, + yorkshire, +) +from examples.yorkshire.tests.plugins import BarkPlugin +from examples.yorkshire.tests.suites.adults import adults_suite +from examples.yorkshire.tests.suites.custom_factory import custom_factory_suite +from examples.yorkshire.tests.suites.legacy.suite import legacy_suite +from examples.yorkshire.tests.suites.puppies.suite import puppies_suite +from examples.yorkshire.tests.suites.rate_limited import rate_limited_suite +from examples.yorkshire.tests.suites.seniors.suite import seniors_suite +from examples.yorkshire.tests.suites.showcase.suite import showcase_suite +from protest import ProTestSession +from protest.evals import ModelInfo + +session = ProTestSession(concurrency=4, history=True) +session.use(BarkPlugin) +session.bind(configure_kennel_logging, autouse=True) +session.bind(kennel) +session.bind(yorkshire) + +# Tests +session.add_suite(puppies_suite) +session.add_suite(adults_suite) +session.add_suite(seniors_suite) +session.add_suite(legacy_suite) +session.add_suite(showcase_suite) +session.add_suite(rate_limited_suite) +session.add_suite(custom_factory_suite) + +# Evals +session.configure_evals(model=ModelInfo(name="yorkshire-chatbot-v1", provider="local")) +session.register_dataset( + dataset, + task=yorkshire_chatbot, +) diff --git a/justfile b/justfile index ddce526..9ddfe7b 100644 --- a/justfile +++ b/justfile @@ -7,7 +7,7 @@ @lint: ruff format . ruff check --fix . - mypy --strict protest + uv run mypy protest @fullcheck: ruff format --check . && ruff check . # lint diff --git a/mkdocs.yml b/mkdocs.yml index 93864db..a643afe 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -65,6 +65,8 @@ nav: - Tags: core-concepts/tags.md - Dependency Injection: core-concepts/dependency-injection.md - Reporters: core-concepts/reporters.md + - Console Output: core-concepts/console.md + - Evals: evals.md - Guides: - Best Practices: best-practices.md - Project Organization: guides/project-organization.md diff --git a/protest/__init__.py b/protest/__init__.py index 61ab9e6..97221b9 100644 --- a/protest/__init__.py +++ b/protest/__init__.py @@ -1,3 +1,4 @@ +from protest import console from protest.api import collect_tests, list_tags, run_session from protest.assertions import ExceptionInfo, RaisesContext, raises, warns from protest.core.session import ProTestSession @@ -41,6 +42,7 @@ "__version__", "caplog", "collect_tests", + "console", "factory", "fixture", "list_tags", diff --git a/protest/api.py b/protest/api.py index a6c6f79..ce8c178 100644 --- a/protest/api.py +++ b/protest/api.py @@ -14,21 +14,27 @@ def test_example(): assert True success = run_session(session) - -Note: - This module uses lazy imports (PLC0415) to optimize startup time. - Users importing `from protest.api import run_session` shouldn't pay - the cost of loading the entire framework until they actually call it. """ from __future__ import annotations +import asyncio from typing import TYPE_CHECKING +from protest.core.collector import Collector +from protest.core.runner import TestRunner +from protest.core.suite import ( + ProTestSuite, # noqa: TC001 — used at runtime in list_tags +) +from protest.events.types import Event +from protest.filters.keyword import KeywordFilterPlugin +from protest.filters.suite import SuiteFilterPlugin +from protest.plugin import PluginBase, PluginContext +from protest.tags.plugin import TagFilterPlugin + if TYPE_CHECKING: from protest.core.session import ProTestSession from protest.entities import RunResult, TestItem - from protest.plugin import PluginContext def run_session( # noqa: PLR0913 - public API with many optional params @@ -69,10 +75,6 @@ def run_session( # noqa: PLR0913 - public API with many optional params Returns: RunResult with success status and interrupted flag. """ - from protest.core.runner import ( # noqa: PLC0415 - lazy import for startup perf - TestRunner, - ) - # Apply session-level settings from ctx or params if ctx is not None: if ctx.get("concurrency") is not None: @@ -91,10 +93,6 @@ def run_session( # noqa: PLR0913 - public API with many optional params # Build context from parameters if not provided if ctx is None: - from protest.plugin import ( # noqa: PLC0415 - lazy import for startup perf - PluginContext, - ) - ctx = PluginContext( args={ "last_failed": last_failed, @@ -136,16 +134,6 @@ def collect_tests( # noqa: PLR0913 - public API with many optional params Returns: List of collected TestItem objects. """ - # Lazy imports for startup performance - only load when function is called - import asyncio # noqa: PLC0415 - - from protest.core.collector import Collector # noqa: PLC0415 - from protest.events.types import Event # noqa: PLC0415 - from protest.filters.keyword import KeywordFilterPlugin # noqa: PLC0415 - from protest.filters.suite import SuiteFilterPlugin # noqa: PLC0415 - from protest.plugin import PluginBase, PluginContext # noqa: PLC0415 - from protest.tags.plugin import TagFilterPlugin # noqa: PLC0415 - # Build context from parameters if not provided if ctx is None: ctx = PluginContext( @@ -182,10 +170,6 @@ def list_tags(session: ProTestSession) -> set[str]: Returns: Set of all tag names declared on fixtures, suites, and tests. """ - from protest.core.suite import ( # noqa: PLC0415, TC001 - lazy import for startup perf - ProTestSuite, - ) - all_tags: set[str] = set() for fixture_reg in session.fixtures: diff --git a/protest/cli/history.py b/protest/cli/history.py new file mode 100644 index 0000000..e83216d --- /dev/null +++ b/protest/cli/history.py @@ -0,0 +1,528 @@ +"""CLI command: protest history — browse run history.""" + +from __future__ import annotations + +import argparse +import sys +from pathlib import Path +from typing import Any + +from protest.history.storage import clean_dirty, load_history + + +def handle_history_command(argv: list[str]) -> None: + """Entry point for `protest history`.""" + parser = argparse.ArgumentParser( + prog="protest history", description="Browse run history" + ) + parser.add_argument( + "--tail", "-n", type=int, default=10, help="Number of entries (default: 10)" + ) + parser.add_argument("--model", type=str, default=None, help="Filter by model name") + parser.add_argument("--suite", type=str, default=None, help="Filter by suite name") + parser.add_argument("--runs", action="store_true", help="Show run-by-run list") + parser.add_argument( + "--show", + nargs="?", + const=0, + type=int, + default=None, + metavar="N", + help="Detailed panel for Nth most recent run (0=latest)", + ) + parser.add_argument( + "--compare", action="store_true", help="Compare 2 most recent runs" + ) + parser.add_argument("--evals", action="store_true", help="Eval runs only") + parser.add_argument("--tests", action="store_true", help="Test runs only") + parser.add_argument( + "--clean-dirty", + action="store_true", + help="Remove runs with uncommitted changes on current commit.", + ) + parser.add_argument( + "--path", type=str, default=None, help="History directory (default: .protest/)" + ) + + args = parser.parse_args(argv) + history_dir = Path(args.path) if args.path else None + + if args.clean_dirty: + removed = clean_dirty(history_dir=history_dir) + print( + f"Removed {removed} dirty entries." + if removed + else "No dirty entries to clean." + ) + sys.exit(0) + + entries = load_history( + history_dir=history_dir, + model=args.model, + suite=args.suite, + evals_only=args.evals, + tests_only=args.tests, + ) + if not entries: + print("No history found.") + sys.exit(0) + + out = _get_output() + if args.compare: + if len(entries) < 2: + print("Need at least 2 runs to compare.") + sys.exit(1) + out.compare(entries[-1], entries[-2]) + elif args.show is not None: + idx = args.show + if idx >= len(entries): + print(f"Only {len(entries)} entries available.") + sys.exit(1) + out.detail(entries[-(idx + 1)]) + elif args.runs: + out.runs(entries[-args.tail :]) + else: + out.stats(entries) + + +# --------------------------------------------------------------------------- +# Output abstraction — Rich if available, plain text fallback +# --------------------------------------------------------------------------- + + +class _Output: + """Base output — plain text.""" + + def stats(self, entries: list[dict[str, Any]]) -> None: + suites = _aggregate_suites(entries) + if not suites: + print("No suite data found.") + return + print(f"\n {'Suite':<22} {'Kind':<6} {'Runs':>4} {'Pass rate':<16} {'Flaky'}") + for name in sorted(suites): + s = suites[name] + rate_str = _format_rate(s["pass_rates"]) + flaky_n = len(s["flaky"]) + print( + f" {name:<22} {s['kind']:<6} {s['n_runs']:>4} {rate_str:<16} {flaky_n or ''}" + ) + print() + + def runs(self, entries: list[dict[str, Any]]) -> None: + for i, e in enumerate(entries): + p, t, r = _entry_stats(e) + git = (e.get("git") or {}).get("commit_short", "?") + ts = e.get("timestamp", "?")[:16] + print(f"\n #{len(entries) - i:<3} {ts} {p}/{t} ({r * 100:.0f}%) {git}") + for sn, sd in e.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + sp = sd.get("passed", 0) + st = sd.get("total_cases", 0) + sr = sp / st * 100 if st else 0 + model = sd.get("model") or "-" + print(f" {sn:<20} {sp}/{st} ({sr:.0f}%) {model}") + print() + + def detail(self, entry: dict[str, Any]) -> None: + kind = "EVAL" if entry.get("evals") else "TEST" + git = entry.get("git") or {} + ts = entry.get("timestamp", "?")[:19] + print( + f"\n {kind} run {ts} {git.get('commit_short', '?')} @ {git.get('branch', '?')}" + ) + for sn, sd in entry.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + suite_model = sd.get("model") + model_str = f" [{suite_model}]" if suite_model else "" + print( + f"\n Suite: {sn} {sd.get('passed', 0)}/{sd.get('total_cases', 0)}{model_str}" + ) + for cn, cd in sd.get("cases", {}).items(): + if not isinstance(cd, dict): + continue + m = "+" if cd.get("passed") else "-" + print(f" {m} {cn} ({_fmt_dur(cd.get('duration', 0))})") + print() + + def compare(self, current: dict[str, Any], previous: dict[str, Any]) -> None: + cm = _get_display_model(current) + pm = _get_display_model(previous) + _, _, cr = _entry_stats(current) + _, _, pr = _entry_stats(previous) + if cm == pm: + print(f"\n Model: {cm}") + else: + print(f"\n Model: {pm} → {cm}") + print(f" Pass rate: {pr * 100:.0f}% → {cr * 100:.0f}%") + changes = _classify_changes(_all_cases(current), _all_cases(previous)) + _print_changes(changes) + + +class _RichOutput(_Output): + """Rich output with colors, tables, panels.""" + + def __init__(self) -> None: + from rich.console import Console # noqa: PLC0415 — optional dep + + self.console = Console(highlight=False) + + def stats(self, entries: list[dict[str, Any]]) -> None: + from rich.table import Table # noqa: PLC0415 — optional dep + + suites = _aggregate_suites(entries) + if not suites: + self.console.print("No suite data found.") + return + table = Table(show_header=True, header_style="bold", box=None, pad_edge=False) + table.add_column("Suite", min_width=12, no_wrap=True) + table.add_column("Kind", width=5) + table.add_column("Runs", justify="right", width=4) + table.add_column("Pass rate", min_width=14, no_wrap=True) + table.add_column("Scores", no_wrap=True) + table.add_column("Flaky", width=5) + + for name in sorted(suites): + s = suites[name] + kind = s["kind"] + kind_color = "cyan" if kind == "eval" else "blue" + rate_str = _rich_rate(s["pass_rates"]) + score_arrows = _rich_score_arrows(s.get("score_values", {})) + flaky_n = len(s["flaky"]) + flaky_str = f"[yellow]{flaky_n}[/]" if flaky_n else "" + table.add_row( + name, + f"[{kind_color}]{kind}[/]", + str(s["n_runs"]), + rate_str, + score_arrows, + flaky_str, + ) + + self.console.print() + self.console.print(table) + self.console.print() + + def runs(self, entries: list[dict[str, Any]]) -> None: + self.console.print() + for i, e in enumerate(entries): + p, t, r = _entry_stats(e) + git = (e.get("git") or {}).get("commit_short", "?") + ts = e.get("timestamp", "?")[:16] + rate_color = "green" if r >= 0.8 else "yellow" if r >= 0.5 else "red" + self.console.print( + f" [dim]#{len(entries) - i:<3}[/] {ts} " + f"[{rate_color}]{p}/{t} ({r * 100:.0f}%)[/] [dim]{git}[/]" + ) + for sn, sd in e.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + sp = sd.get("passed", 0) + st = sd.get("total_cases", 0) + sr = sp / st * 100 if st else 0 + sc = "green" if sr >= 80 else "yellow" if sr >= 50 else "red" + model = sd.get("model") or "-" + self.console.print( + f" {sn:<20} [{sc}]{sp}/{st} ({sr:.0f}%)[/] [cyan]{model}[/]" + ) + self.console.print() + + def detail(self, entry: dict[str, Any]) -> None: + from rich.panel import Panel # noqa: PLC0415 — optional dep + from rich.text import Text # noqa: PLC0415 — optional dep + + kind = "EVAL" if entry.get("evals") else "TEST" + git = entry.get("git") or {} + ts = entry.get("timestamp", "?")[:19] + evals_info = entry.get("evals") or {} + + lines = Text() + lines.append(f"{kind} run", style="bold") + lines.append(f" {ts} ", style="dim") + lines.append( + f"{git.get('commit_short', '?')} @ {git.get('branch', '?')}\n", style="dim" + ) + + # Scores summary + for sn, stats in evals_info.get("scores_summary", {}).items(): + mean = stats.get("mean", 0) + color = "green" if mean >= 0.8 else "yellow" if mean >= 0.5 else "red" + lines.append(f" {sn}: ", style="dim") + lines.append(f"mean={mean:.2f}", style=color) + lines.append( + f" p50={stats.get('median', 0):.2f} p95={stats.get('p95', 0):.2f}\n", + style="dim", + ) + + for sn, sd in entry.get("suites", {}).items(): + if not isinstance(sd, dict): + continue + p, t = sd.get("passed", 0), sd.get("total_cases", 0) + lines.append("\nSuite: ", style="bold") + lines.append(sn) + pc = "green" if p == t else "yellow" if p >= t * 0.5 else "red" + lines.append(f" {p}/{t}", style=pc) + suite_model = sd.get("model") + if suite_model: + lines.append(f" [{suite_model}]", style="cyan") + lines.append(f" {_fmt_dur(sd.get('duration', 0))}\n", style="dim") + for cn, cd in sd.get("cases", {}).items(): + if not isinstance(cd, dict): + continue + if cd.get("passed"): + lines.append(" + ", style="green") + else: + lines.append(" - ", style="red") + lines.append(cn) + lines.append(f" ({_fmt_dur(cd.get('duration', 0))})\n", style="dim") + + self.console.print() + self.console.print( + Panel(lines, title="[bold]Run Detail[/]", border_style="cyan") + ) + + def compare(self, current: dict[str, Any], previous: dict[str, Any]) -> None: + from rich.panel import Panel # noqa: PLC0415 — optional dep + from rich.text import Text # noqa: PLC0415 — optional dep + + cm = _get_display_model(current) + pm = _get_display_model(previous) + _, _, cr = _entry_stats(current) + _, _, pr = _entry_stats(previous) + delta = cr - pr + + lines = Text() + if cm == pm: + lines.append(f"Model: {cm}\n", style="cyan") + else: + lines.append(f"Model: {pm} → {cm}\n", style="cyan") + + lines.append("Pass rate: ") + lines.append(f"{pr * 100:.0f}%", style="dim") + lines.append(" → ") + rc = "green" if delta > 0 else "red" if delta < 0 else "" + lines.append(f"{cr * 100:.0f}%", style=rc) + if abs(delta) >= 0.001: + lines.append(f" ({delta * 100:+.0f}%)", style=rc) + lines.append("\n\n") + + changes = _classify_changes(_all_cases(current), _all_cases(previous)) + labels = [ + ("fixed", "Fixed", "green", "+"), + ("regressed", "Regressions", "red", "-"), + ("modified", "Modified", "yellow", "⟳"), + ("new", "New", "cyan", "*"), + ] + has_any = False + for key, label, color, marker in labels: + items = changes[key] + if items: + has_any = True + lines.append(f"{label} ({len(items)}):\n", style=color) + for n in items: + lines.append(f" {marker} {n}\n") + lines.append("\n") + if not has_any: + lines.append("No changes.\n", style="dim") + + self.console.print() + self.console.print( + Panel(lines, title="[bold]Run Comparison[/]", border_style="cyan") + ) + + +def _get_output() -> _Output: + try: + return _RichOutput() + except ImportError: + return _Output() + + +# --------------------------------------------------------------------------- +# Rich helpers +# --------------------------------------------------------------------------- + + +def _rich_rate(rates: list[float]) -> str: + if len(rates) >= 2: + first, last = rates[0], rates[-1] + delta = last - first + if delta > 0.01: + return f"[dim]{first * 100:.0f}%[/] [green]↗ {last * 100:.0f}%[/]" + if delta < -0.01: + return f"[dim]{first * 100:.0f}%[/] [red]↘ {last * 100:.0f}%[/]" + return f"{last * 100:.0f}%" + if rates: + return f"{rates[0] * 100:.0f}%" + return "-" + + +def _rich_score_arrows(score_values: dict[str, list[float]]) -> str: + """Score trend arrows: ↗↘→ per score.""" + parts: list[str] = [] + for _name, values in sorted(score_values.items()): + if len(values) >= 2: + d = values[-1] - values[0] + if d > 0.01: + parts.append("[green]↗[/]") + elif d < -0.01: + parts.append("[red]↘[/]") + else: + parts.append("[dim]→[/]") + return "".join(parts) + + +# --------------------------------------------------------------------------- +# Data helpers +# --------------------------------------------------------------------------- + + +def _format_rate(rates: list[float]) -> str: + if len(rates) >= 2: + first, last = rates[0], rates[-1] + delta = last - first + arrow = "↗" if delta > 0.01 else "↘" if delta < -0.01 else "→" + return f"{first * 100:.0f}% {arrow} {last * 100:.0f}%" + if rates: + return f"{rates[0] * 100:.0f}%" + return "-" + + +def _aggregate_suites(entries: list[dict[str, Any]]) -> dict[str, dict[str, Any]]: + suites: dict[str, dict[str, Any]] = {} + for entry in entries: + for name, data in entry.get("suites", {}).items(): + if not isinstance(data, dict): + continue + if name not in suites: + suites[name] = { + "kind": data.get("kind", "test"), + "n_runs": 0, + "pass_rates": [], + "flaky": {}, + "cases_seen": {}, + "score_values": {}, + } + s = suites[name] + errored = data.get("errored", 0) + total = data.get("total_cases", 0) + passed = data.get("passed", 0) + # Skip error-only runs (fixture crashes) from stats + if errored and errored >= total: + continue + s["n_runs"] += 1 + if total: + s["pass_rates"].append(passed / total) + _track_cases(s, data.get("cases", {})) + + for s in suites.values(): + s["flaky"] = { + cn: cs["fails"] + for cn, cs in s["cases_seen"].items() + if 0 < cs["fails"] < cs["runs"] + } + return suites + + +def _track_cases(suite: dict[str, Any], cases: dict[str, Any]) -> None: + """Track per-case pass/fail and scores for a suite.""" + for cn, cd in cases.items(): + if not isinstance(cd, dict): + continue + # Skip errored cases (fixture crashes) from stats + if cd.get("is_error"): + continue + if cn not in suite["cases_seen"]: + suite["cases_seen"][cn] = {"runs": 0, "fails": 0} + suite["cases_seen"][cn]["runs"] += 1 + if not cd.get("passed", True): + suite["cases_seen"][cn]["fails"] += 1 + for sn, sv in cd.get("scores", {}).items(): + if isinstance(sv, (int, float)): + if sn not in suite["score_values"]: + suite["score_values"][sn] = [] + suite["score_values"][sn].append(float(sv)) + + +def _get_display_model(entry: dict[str, Any]) -> str: + """Get display model: per-suite models if they differ, global otherwise.""" + suite_models: set[str] = { + sd["model"] + for sd in entry.get("suites", {}).values() + if isinstance(sd, dict) and sd.get("model") + } + if len(suite_models) > 1: + return ", ".join(sorted(suite_models)) + if suite_models: + return next(iter(suite_models)) + return (entry.get("evals") or {}).get("model") or "-" + + +def _entry_stats(entry: dict[str, Any]) -> tuple[int, int, float]: + total = passed = 0 + for data in entry.get("suites", {}).values(): + if isinstance(data, dict): + total += data.get("total_cases", 0) + passed += data.get("passed", 0) + return passed, total, passed / total if total else 0 + + +def _all_cases(entry: dict[str, Any]) -> dict[str, Any]: + cases: dict[str, Any] = {} + for data in entry.get("suites", {}).values(): + if isinstance(data, dict): + cases.update(data.get("cases", {})) + return cases + + +def _classify_changes( + curr_cases: dict[str, Any], + prev_cases: dict[str, Any], +) -> dict[str, list[str]]: + result: dict[str, list[str]] = { + "fixed": [], + "regressed": [], + "modified": [], + "new": [], + } + for name, curr in curr_cases.items(): + prev = prev_cases.get(name) + if prev is None: + result["new"].append(name) + elif curr.get("case_hash") and curr["case_hash"] != prev.get("case_hash"): + result["modified"].append(f"{name} (case modified)") + elif curr.get("eval_hash") and curr["eval_hash"] != prev.get("eval_hash"): + result["modified"].append(f"{name} (scoring modified)") + elif curr.get("passed") and not prev.get("passed"): + result["fixed"].append(name) + elif not curr.get("passed") and prev.get("passed"): + result["regressed"].append(name) + return result + + +def _print_changes(changes: dict[str, list[str]]) -> None: + labels = { + "fixed": ("Fixed", "+"), + "regressed": ("Regressions", "-"), + "modified": ("Modified", "⟳"), + "new": ("New", "*"), + } + has_any = False + for key, (label, marker) in labels.items(): + if changes[key]: + has_any = True + print(f"\n {label} ({len(changes[key])}):") + for n in changes[key]: + print(f" {marker} {n}") + if not has_any: + print(" No changes.") + print() + + +def _fmt_dur(seconds: float) -> str: + if seconds < 1: + return f"{seconds * 1000:.0f}ms" + if seconds < 60: + return f"{seconds:.1f}s" + return f"{int(seconds // 60)}m{seconds % 60:.0f}s" diff --git a/protest/cli/main.py b/protest/cli/main.py index a913e7f..648fd26 100644 --- a/protest/cli/main.py +++ b/protest/cli/main.py @@ -2,12 +2,16 @@ import argparse import sys -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any + +from protest.api import collect_tests, list_tags, run_session +from protest.core.session import ProTestSession +from protest.loader import LoadError, load_session, parse_target +from protest.plugin import PluginContext +from protest.reporting.verbosity import Verbosity if TYPE_CHECKING: - from protest.core.session import ProTestSession from protest.entities import TestItem - from protest.plugin import PluginContext HELP_EPILOG = """ Examples: @@ -56,9 +60,6 @@ def _handle_tags_command() -> None: def _list_tags(target: str, app_dir: str, recursive: bool = False) -> None: """List all tags in a session.""" - from protest.api import collect_tests, list_tags - from protest.loader import LoadError, load_session - try: session = load_session(target, app_dir) except LoadError as exc: @@ -103,19 +104,21 @@ def main() -> None: _print_help() return - if command == "tags": - _handle_tags_command() - return - - if command == "run": - _handle_run_command() + commands: dict[str, Any] = { + "tags": _handle_tags_command, + "run": lambda: _handle_run_command(kind_filter="test"), + "eval": lambda: _handle_run_command(kind_filter="eval"), + "history": _handle_history_command, + "live": _handle_live_command, + } + + handler = commands.get(command) + if handler: + handler() return - if command == "live": - _handle_live_command() - return - - print(f"Error: Unknown command '{command}'. Use 'run', 'tags', or 'live'.") + valid = ", ".join(f"'{c}'" for c in commands) + print(f"Error: Unknown command '{command}'. Use {valid}.") sys.exit(1) @@ -134,7 +137,7 @@ def _handle_live_command() -> None: ) args = parser.parse_args(sys.argv[2:]) - from protest.reporting.web import run_live_server + from protest.reporting.web import run_live_server # noqa: PLC0415 — optional dep run_live_server(port=args.port) @@ -143,9 +146,11 @@ def _print_help() -> None: """Print main help.""" print("ProTest - Async-first Python test framework\n") print("Commands:") - print(" run Run tests") - print(" live Start live reporter server") - print(" tags Tag inspection commands") + print(" run Run tests") + print(" eval Run evaluations") + print(" history Browse run history") + print(" live Start live reporter server") + print(" tags Tag inspection commands") print(HELP_EPILOG) @@ -228,10 +233,17 @@ def _create_run_parser() -> argparse.ArgumentParser: return parser -def _handle_run_command() -> None: - """Handle 'protest run' subcommand with two-phase parsing.""" - from protest.loader import LoadError, load_session, parse_target +def _handle_history_command() -> None: + """Handle 'protest history' subcommand.""" + from protest.cli.history import ( # noqa: PLC0415 — heavy module + handle_history_command, + ) + handle_history_command(sys.argv[2:]) + + +def _handle_run_command(kind_filter: str | None = None) -> None: + """Handle 'protest run' / 'protest eval' with two-phase parsing.""" argv = sys.argv[2:] # Phase 1: Parse base args to get target @@ -240,8 +252,6 @@ def _handle_run_command() -> None: # If --help without target, show full help with all plugin options if ("--help" in remaining or "-h" in remaining) and not base_args.target: - from protest.core.session import ProTestSession - full_parser = _create_run_parser() for plugin_class in ProTestSession.default_plugin_classes(): plugin_class.add_cli_options(full_parser) @@ -271,17 +281,15 @@ def _handle_run_command() -> None: args = full_parser.parse_args(argv) # Phase 5: Build context - from protest.plugin import PluginContext - from protest.reporting.verbosity import Verbosity - effective_verbosity = Verbosity.QUIET if args.quiet else args.verbosity - ctx = PluginContext( - args={ - **vars(args), - "target_suite": suite_filter, - "verbosity": effective_verbosity, - } - ) + ctx_args: dict[str, Any] = { + **vars(args), + "target_suite": suite_filter, + "verbosity": effective_verbosity, + } + if kind_filter: + ctx_args["kind_filter"] = kind_filter + ctx = PluginContext(args=ctx_args) # Phase 6: Run tests (api.run_session handles plugin activation) run_tests(session, ctx, collect_only=args.collect_only) @@ -292,8 +300,6 @@ def run_tests( ctx: PluginContext, collect_only: bool = False, ) -> None: - from protest.api import collect_tests, run_session - if collect_only: items = collect_tests(session, ctx=ctx) print(f"Collected {len(items)} test(s):\n") diff --git a/protest/console.py b/protest/console.py new file mode 100644 index 0000000..9959165 --- /dev/null +++ b/protest/console.py @@ -0,0 +1,68 @@ +"""protest.console — progress output that bypasses test capture. + +Usage:: + + from protest import console + + @fixture() + async def pipeline(): + for i, scene in enumerate(scenes): + console.print(f"[bold]pipeline:[/] importing {scene.name} ({i+1}/{len(scenes)})") + await import_scene(scene) + + # Raw mode — no markup processing + console.print("debug: raw bytes here", raw=True) + +Messages go through the event bus → reporters display them inline. +If no event bus is available (outside a protest session), falls back to stderr. +""" + +from __future__ import annotations + +import contextlib +import re +import sys + +from protest.events.types import Event +from protest.execution.capture import get_event_bus + + +def print(msg: str, *, raw: bool = False) -> None: + """Print a message that bypasses test capture. + + Goes through the event bus so reporters display it at the right place. + Supports Rich markup (stripped for ASCII reporter). + + Args: + msg: The message to print. Supports Rich markup unless raw=True. + raw: If True, no markup processing — message passed as-is. + """ + bus = get_event_bus() + if bus is None: + _fallback_print(msg, raw) + return + + # Call handlers directly (sync, bypasses async emit). + # This ensures messages appear immediately, not after the test. + for handler_entry in bus._handlers.get(Event.USER_PRINT, []): # type: ignore[attr-defined] + with contextlib.suppress(Exception): + handler_entry.func((msg, raw)) + + +def _fallback_print(msg: str, raw: bool) -> None: + """Fallback when no event bus — write to real stderr (bypassing capture).""" + text = msg if raw else strip_markup(msg) + # sys.stderr may be wrapped by TaskAwareStream — get the original + stream = getattr(sys.stderr, "_original", sys.stderr) + stream.write(text + "\n") + stream.flush() + + +def strip_markup(msg: str) -> str: + """Strip Rich markup tags from a string. + + Handles escaped brackets (``\\[text]`` → ``[text]``). + """ + msg = msg.replace("\\[", "\x00") + msg = re.sub(r"\[/?[^\]]*\]", "", msg) + return msg.replace("\x00", "[") diff --git a/protest/core/collector.py b/protest/core/collector.py index 74dd75d..d7c83db 100644 --- a/protest/core/collector.py +++ b/protest/core/collector.py @@ -2,9 +2,10 @@ from inspect import signature from itertools import groupby, product -from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin from protest.di.decorators import get_fixture_marker, unwrap_fixture +from protest.di.hints import get_type_hints_compat from protest.di.markers import Use from protest.di.validation import _extract_from_params from protest.entities import FixtureCallable, SuitePath, TestItem, TestRegistration @@ -18,10 +19,7 @@ def _extract_use_fixtures(func: Callable[..., Any]) -> list[FixtureCallable]: """Extract fixtures referenced via Use() markers in function parameters.""" - try: - type_hints = get_type_hints(func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(func) fixtures: list[FixtureCallable] = [] for param_name in signature(func).parameters: @@ -164,6 +162,7 @@ def _expand_registration( xfail=test_reg.xfail, timeout=test_reg.timeout, retry=test_reg.retry, + is_eval=test_reg.is_eval, ) ] @@ -188,6 +187,7 @@ def _expand_registration( xfail=test_reg.xfail, timeout=test_reg.timeout, retry=test_reg.retry, + is_eval=test_reg.is_eval, ) ) diff --git a/protest/core/execution/test_executor.py b/protest/core/execution/test_executor.py index 8fa92a3..3c065f2 100644 --- a/protest/core/execution/test_executor.py +++ b/protest/core/execution/test_executor.py @@ -7,11 +7,12 @@ import time from contextlib import AsyncExitStack, asynccontextmanager from inspect import signature -from typing import TYPE_CHECKING, Any, get_type_hints +from typing import TYPE_CHECKING, Any from protest.core.collector import get_transitive_fixtures from protest.core.outcome import OutcomeBuilder, TestExecutionResult from protest.di.container import FixtureContainer +from protest.di.hints import get_type_hints_compat from protest.entities import ( FixtureCallable, TestItem, @@ -20,11 +21,13 @@ TestStartInfo, TestTeardownInfo, ) +from protest.entities.events import EvalPayload from protest.events.types import Event from protest.exceptions import FixtureError from protest.execution.async_bridge import ensure_async from protest.execution.capture import ( CaptureCurrentTest, + get_current_log_records, reset_current_node_id, set_current_node_id, ) @@ -112,8 +115,6 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring ) ) - start = time.perf_counter() - try: kwargs = await self._resolve_test_kwargs(item, ctx) except Exception as exc: @@ -122,13 +123,15 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring test_name=test_name, node_id=node_id, suite_path=item.suite_path, - duration=time.perf_counter() - start, + duration=0, output=buffer.getvalue(), error=exc, is_fixture_error=True, ) ) + start = time.perf_counter() + # Conditional skip (callable) - evaluated AFTER fixture resolution if item.skip and item.skip.is_conditional: try: @@ -162,26 +165,33 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring previous_errors: list[Exception] = [] error: Exception | None = None is_fixture_error = False + eval_payload: EvalPayload | None = None attempt = 1 # Initialized here; always overwritten by loop for attempt in range(1, max_attempts + 1): error = None is_fixture_error = False + eval_payload = None try: if item.timeout is not None: try: - await asyncio.wait_for( + return_value = await asyncio.wait_for( ensure_async(item.func, **kwargs), timeout=item.timeout, ) except asyncio.TimeoutError: - # Only wrap timeout from wait_for, not from test code raise asyncio.TimeoutError( f"Test exceeded timeout of {item.timeout}s" ) from None else: - await ensure_async(item.func, **kwargs) + return_value = await ensure_async(item.func, **kwargs) + + # For eval items: capture EvalPayload and determine pass/fail + if item.is_eval and isinstance(return_value, EvalPayload): + eval_payload = return_value + if not eval_payload.passed: + error = _build_eval_error(eval_payload) except FixtureError as exc: error = exc.original is_fixture_error = True @@ -231,6 +241,9 @@ async def _run_test( # noqa: PLR0912 - complex test execution flow, refactoring attempt=attempt, max_attempts=max_attempts, previous_errors=tuple(previous_errors), + is_eval=item.is_eval, + eval_payload=eval_payload, + log_records=tuple(get_current_log_records()), ) ) @@ -243,10 +256,7 @@ async def _resolve_test_kwargs( func_signature = signature(item.func) kwargs: dict[str, Any] = dict(item.case_kwargs) - try: - type_hints = get_type_hints(item.func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(item.func) for param_name, param in func_signature.parameters.items(): if param_name in kwargs: @@ -346,3 +356,14 @@ async def _acquire_fixture_semaphores( for _, sem in sems_sorted: await stack.enter_async_context(_semaphore_context(sem)) yield + + +def _build_eval_error(payload: EvalPayload) -> AssertionError: + """Build a descriptive AssertionError from failed eval scores.""" + parts = [] + for name, entry in payload.scores.items(): + if entry.skipped: + parts.append(f"{name}=⊘") + elif not entry.passed: + parts.append(f"{name}={entry.value}") + return AssertionError(f"{', '.join(parts)}") diff --git a/protest/core/outcome.py b/protest/core/outcome.py index b89a7bb..2563d95 100644 --- a/protest/core/outcome.py +++ b/protest/core/outcome.py @@ -1,11 +1,17 @@ """Test outcome classification and building.""" +from __future__ import annotations + from dataclasses import dataclass from enum import Enum, auto +from typing import TYPE_CHECKING, Any from protest.entities import SuitePath, TestCounts, TestOutcome, TestResult from protest.events.types import Event +if TYPE_CHECKING: + from protest.entities.events import EvalPayload + class OutcomeType(Enum): """Classification of test execution outcomes.""" @@ -35,13 +41,16 @@ class TestExecutionResult: attempt: int = 1 max_attempts: int = 1 previous_errors: tuple[Exception, ...] = () + is_eval: bool = False + eval_payload: EvalPayload | None = None + log_records: tuple[Any, ...] = () class OutcomeBuilder: """Builds TestOutcome from test execution results.""" def build(self, exec_result: TestExecutionResult) -> TestOutcome: - """Build a TestOutcome from execution result by classifying and constructing.""" + """Build a TestOutcome from execution result.""" outcome_type = self._classify(exec_result) match outcome_type: @@ -59,7 +68,6 @@ def build(self, exec_result: TestExecutionResult) -> TestOutcome: return self._build_fail(exec_result) def _classify(self, exec_result: TestExecutionResult) -> OutcomeType: - """Classify execution result into outcome type.""" match ( exec_result.skip_reason, exec_result.error, @@ -79,91 +87,51 @@ def _classify(self, exec_result: TestExecutionResult) -> OutcomeType: case _: return OutcomeType.FAIL - def _build_skip(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - skip_reason=exec_result.skip_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(skipped=1), Event.TEST_SKIP) - - def _build_pass(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - duration=exec_result.duration, - output=exec_result.output, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(passed=1), Event.TEST_PASS) - - def _build_xpass(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - duration=exec_result.duration, - output=exec_result.output, - xfail_reason=exec_result.xfail_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, + def _base_kwargs(self, er: TestExecutionResult) -> dict[str, object]: + """Common TestResult kwargs from an execution result.""" + return { + "name": er.test_name, + "node_id": er.node_id, + "suite_path": er.suite_path, + "duration": er.duration, + "output": er.output, + "timeout": er.timeout, + "attempt": er.attempt, + "max_attempts": er.max_attempts, + "previous_errors": er.previous_errors, + "is_eval": er.is_eval, + "eval_payload": er.eval_payload, + "log_records": er.log_records, + } + + def _build_skip(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(duration=0, output="", skip_reason=er.skip_reason) + return TestOutcome(TestResult(**kw), TestCounts(skipped=1), Event.TEST_SKIP) # type: ignore[arg-type] + + def _build_pass(self, er: TestExecutionResult) -> TestOutcome: + return TestOutcome( + TestResult(**self._base_kwargs(er)), # type: ignore[arg-type] + TestCounts(passed=1), + Event.TEST_PASS, ) - return TestOutcome(result, TestCounts(xpassed=1), Event.TEST_XPASS) - - def _build_error(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - is_fixture_error=True, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(errored=1), Event.TEST_FAIL) - - def _build_xfail(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - xfail_reason=exec_result.xfail_reason, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(xfailed=1), Event.TEST_XFAIL) - - def _build_fail(self, exec_result: TestExecutionResult) -> TestOutcome: - result = TestResult( - name=exec_result.test_name, - node_id=exec_result.node_id, - suite_path=exec_result.suite_path, - error=exec_result.error, - duration=exec_result.duration, - output=exec_result.output, - timeout=exec_result.timeout, - attempt=exec_result.attempt, - max_attempts=exec_result.max_attempts, - previous_errors=exec_result.previous_errors, - ) - return TestOutcome(result, TestCounts(failed=1), Event.TEST_FAIL) + + def _build_xpass(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw["xfail_reason"] = er.xfail_reason + return TestOutcome(TestResult(**kw), TestCounts(xpassed=1), Event.TEST_XPASS) # type: ignore[arg-type] + + def _build_error(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(error=er.error, is_fixture_error=True) + return TestOutcome(TestResult(**kw), TestCounts(errored=1), Event.TEST_FAIL) # type: ignore[arg-type] + + def _build_xfail(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw.update(error=er.error, xfail_reason=er.xfail_reason) + return TestOutcome(TestResult(**kw), TestCounts(xfailed=1), Event.TEST_XFAIL) # type: ignore[arg-type] + + def _build_fail(self, er: TestExecutionResult) -> TestOutcome: + kw = self._base_kwargs(er) + kw["error"] = er.error + return TestOutcome(TestResult(**kw), TestCounts(failed=1), Event.TEST_FAIL) # type: ignore[arg-type] diff --git a/protest/core/runner.py b/protest/core/runner.py index 0347c2d..f6bab5b 100644 --- a/protest/core/runner.py +++ b/protest/core/runner.py @@ -1,12 +1,15 @@ """Test runner orchestration.""" +from __future__ import annotations + import asyncio import time +from typing import TYPE_CHECKING, Any from protest.core.collector import Collector from protest.core.execution import ParallelExecutor, SuiteManager, TestExecutor from protest.core.outcome import OutcomeBuilder -from protest.core.session import ProTestSession +from protest.core.session import ProTestSession # noqa: TC001 — used at runtime from protest.core.tracker import SuiteTracker from protest.entities import ( RunResult, @@ -14,14 +17,20 @@ SessionSetupInfo, TestCounts, ) +from protest.evals.types import EvalCaseResult, EvalScore, EvalSuiteReport from protest.events.types import Event from protest.execution.capture import ( GlobalCapturePatch, + reset_event_bus, + set_event_bus, set_session_setup_capture, ) from protest.execution.context import cancellation_event from protest.execution.interrupt import InterruptHandler +if TYPE_CHECKING: + from protest.entities.events import TestResult + class TestRunner: """Executes tests with parallel support and fixture lifecycle management. @@ -36,6 +45,7 @@ def __init__(self, session: ProTestSession) -> None: self._interrupt_handler = InterruptHandler() self._interrupted = False self._force_interrupt_emitted = False + self._eval_results: dict[str, list[EvalCaseResult]] = {} # Extracted components self._suite_manager = SuiteManager(session) @@ -61,10 +71,23 @@ def run(self) -> RunResult: self._interrupt_handler.uninstall() loop.close() - async def _main_loop(self) -> bool: + def _collect_eval_result(self, result: TestResult) -> None: + """Internal handler: collect eval results from TEST_PASS/FAIL events.""" + if not result.is_eval or result.eval_payload is None: + return + suite_name = result.suite_path.root_name if result.suite_path else "evals" + case_result = _build_eval_case_result(result) + self._eval_results.setdefault(suite_name, []).append(case_result) + + async def _main_loop(self) -> bool: # noqa: PLR0915 """The main async loop for running tests.""" session_start = time.perf_counter() + # Register internal eval collector before tests run + self._eval_results.clear() + self._session.events.on(Event.TEST_PASS, self._collect_eval_result) + self._session.events.on(Event.TEST_FAIL, self._collect_eval_result) + collector = Collector() items = collector.collect(self._session) @@ -82,6 +105,7 @@ async def _main_loop(self) -> bool: cancel_token = cancellation_event.set( self._interrupt_handler.force_teardown_event ) + bus_token = set_event_bus(self._session.events) try: with GlobalCapturePatch(show_output=not self._session.capture): async with self._session: @@ -112,6 +136,8 @@ async def _main_loop(self) -> bool: ): suite_result = self._suite_manager.build_result(suite_path) await self._session.events.emit(Event.SUITE_END, suite_result) + # Emit EVAL_SUITE_END for eval suites + await self._emit_eval_suite_end(suite_path) await self._session.events.emit(Event.SESSION_TEARDOWN_START) finally: @@ -124,6 +150,7 @@ async def _main_loop(self) -> bool: await self._session.events.emit(Event.SESSION_INTERRUPTED, True) self._force_interrupt_emitted = True cancellation_event.reset(cancel_token) + reset_event_bus(bus_token) if self._interrupt_handler.should_stop_new_tests: self._interrupted = True @@ -151,8 +178,61 @@ async def _main_loop(self) -> bool: await self._session.events.wait_pending() await self._session.events.emit(Event.SESSION_COMPLETE, session_result) + # Unregister eval collector + self._session.events.off(Event.TEST_PASS, self._collect_eval_result) + self._session.events.off(Event.TEST_FAIL, self._collect_eval_result) + return ( total_counts.failed == 0 and total_counts.errored == 0 and total_counts.xpassed == 0 ) + + async def _emit_eval_suite_end(self, suite_path: Any) -> None: + """Emit EVAL_SUITE_END if this suite_path corresponds to an eval suite.""" + suite_name = ( + suite_path.root_name + if hasattr(suite_path, "root_name") + else str(suite_path) + ) + eval_cases = self._eval_results.get(suite_name) + if not eval_cases: + return + report = EvalSuiteReport( + suite_name=suite_name, + cases=tuple(eval_cases), + duration=sum(c.duration for c in eval_cases), + ) + await self._session.events.emit(Event.EVAL_SUITE_END, report) + + +def _build_eval_case_result(result: TestResult) -> EvalCaseResult: + """Build EvalCaseResult from a TestResult with eval_payload.""" + payload = result.eval_payload + assert payload is not None + return EvalCaseResult( + case_name=payload.case_name or "", + node_id=result.node_id, + scores=tuple( + EvalScore( + name=name, + value=entry.value, + ) + for name, entry in payload.scores.items() + ), + duration=payload.task_duration, + passed=not (result.error is not None or not payload.passed), + inputs=payload.inputs, + output=payload.output, + expected_output=payload.expected_output, + case_hash=payload.case_hash, + eval_hash=payload.eval_hash, + task_input_tokens=payload.task_input_tokens, + task_output_tokens=payload.task_output_tokens, + task_cost=payload.task_cost, + judge_call_count=payload.judge_call_count, + judge_input_tokens=payload.judge_input_tokens, + judge_output_tokens=payload.judge_output_tokens, + judge_cost=payload.judge_cost, + is_error=result.is_fixture_error, + ) diff --git a/protest/core/session.py b/protest/core/session.py index 778dbb3..efef4fb 100644 --- a/protest/core/session.py +++ b/protest/core/session.py @@ -1,18 +1,20 @@ from __future__ import annotations -from typing import TYPE_CHECKING, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar if TYPE_CHECKING: from collections.abc import Callable + from pathlib import Path from types import TracebackType from protest.compat import Self - from protest.core.suite import ProTestSuite from protest.entities import FixtureCallable + from protest.evals.types import JudgeInfo, ModelInfo from protest.plugin import PluginBase, PluginContext from protest.cache.plugin import CachePlugin from protest.cache.storage import CacheStorage +from protest.core.suite import ProTestSuite from protest.di.container import FixtureContainer from protest.di.decorators import get_fixture_marker, unwrap_fixture from protest.entities import ( @@ -20,17 +22,22 @@ FixtureScope, Retry, Skip, + SuiteKind, TestRegistration, Xfail, normalize_retry, normalize_skip, normalize_xfail, ) +from protest.evals.history import EvalHistoryPlugin +from protest.evals.results_writer import EvalResultsWriter +from protest.evals.wrapper import make_eval_wrapper from protest.events.bus import EventBus from protest.events.types import Event from protest.exceptions import InvalidMaxConcurrencyError from protest.execution.capture import set_session_teardown_capture from protest.filters.keyword import KeywordFilterPlugin +from protest.filters.kind import KindFilterPlugin from protest.filters.suite import SuiteFilterPlugin from protest.reporting.ascii import AsciiReporter from protest.reporting.ctrf import CTRFReporter @@ -54,7 +61,13 @@ class ProTestSession: concurrency: Number of parallel test workers (default: 1). """ - def __init__(self, concurrency: int = 1) -> None: + def __init__( + self, + concurrency: int = 1, + history: bool = False, + history_dir: Path | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: if concurrency < 1: raise InvalidMaxConcurrencyError(concurrency) @@ -72,6 +85,12 @@ def __init__(self, concurrency: int = 1) -> None: self._capture: bool = True self._setup_duration: float = 0 self._teardown_duration: float = 0 + self._history = history + self._history_dir = history_dir + self._metadata: dict[str, Any] = dict(metadata) if metadata else {} + self._eval_model: ModelInfo | None = None # set by EvalSession + self._eval_judge: JudgeInfo | None = None # set by EvalSession + self._eval_judge_instance: Any = None # set by EvalSession async def resolve_autouse(self) -> None: """Resolve all session autouse fixtures at session start.""" @@ -104,6 +123,18 @@ def capture(self) -> bool: def capture(self, value: bool) -> None: self._capture = value + @property + def history(self) -> bool: + return self._history + + @property + def history_dir(self) -> Path | None: + return self._history_dir + + @property + def metadata(self) -> dict[str, Any]: + return self._metadata + @property def setup_duration(self) -> float: """Duration of session setup (available after resolve_autouse).""" @@ -151,6 +182,7 @@ def test( skip_reason: str = "Skipped", xfail: bool | str | Xfail | None = None, retry: int | Retry | None = None, + is_eval: bool = False, ) -> Callable[[FuncT], FuncT]: def decorator(func: FuncT) -> FuncT: if timeout is not None and timeout < 0: @@ -168,21 +200,63 @@ def decorator(func: FuncT) -> FuncT: xfail=norm_xfail, timeout=timeout, retry=norm_retry, + is_eval=is_eval, ) ) return func return decorator + def eval( + self, + evaluators: list[Any] | None = None, + expected_key: str = "expected", + tags: list[str] | None = None, + timeout: float | None = None, + name: str | None = None, + model: Any = None, + ) -> Callable[[FuncT], FuncT]: + """Register a scored eval test. + + Creates an implicit eval suite named after the function. + The decorated function's return value is passed to evaluators. + Use with ForEach/From for parametrization:: + + @session.eval(evaluators=[my_scorer], model=ModelInfo(name="qwen")) + async def my_eval(case: Annotated[dict, From(cases)]) -> str: + return await run(case["q"]) + """ + + def decorator(func: FuncT) -> FuncT: + suite_name = name or func.__name__ + suite_meta: dict[str, Any] = {} + resolved_model = model or self._eval_model + if resolved_model: + suite_meta["model"] = resolved_model.name + suite_meta["provider"] = resolved_model.provider + suite = ProTestSuite( + name=suite_name, + tags=list(tags or []), + kind=SuiteKind.EVAL, + metadata=suite_meta, + ) + wrapper = make_eval_wrapper( + func, + evaluators or [], + expected_key, + judge=self._eval_judge_instance, + ) + suite.test(tags=tags, timeout=timeout, is_eval=True)(wrapper) + self.add_suite(suite) + return func + + return decorator + def add_suite(self, suite: ProTestSuite) -> None: """Add a suite to this session.""" suite._attach_to_session(self) self._suites.append(suite) - def include_suite(self, suite: ProTestSuite) -> None: - """Alias for add_suite (backward compatibility).""" - self.add_suite(suite) - def bind( self, fn: FixtureCallable, @@ -246,6 +320,7 @@ def default_plugin_classes() -> list[type[PluginBase]]: TagFilterPlugin, SuiteFilterPlugin, KeywordFilterPlugin, + KindFilterPlugin, RichReporter, AsciiReporter, CTRFReporter, @@ -256,6 +331,12 @@ def register_default_plugins(self) -> None: """Register all standard ProTest plugins for CLI discovery.""" for plugin_class in self.default_plugin_classes(): self.use(plugin_class) + if self._history: + from protest.history.plugin import ( # noqa: PLC0415 — conditional + HistoryPlugin, + ) + + self.register_plugin(HistoryPlugin(history_dir=self._history_dir)) @property def plugin_classes(self) -> list[type[PluginBase]]: @@ -294,6 +375,32 @@ def activate_plugins(self, ctx: PluginContext) -> None: if instance is not None: self.register_plugin(instance) + # Auto-wire eval support if any suite has kind="eval" + if any(s.kind == SuiteKind.EVAL for s in self._suites): + self._wire_eval_support() + + def _wire_eval_support(self) -> None: + """Wire eval history + results writer plugins (no EvalPlugin).""" + + judge_dict = None + if self._eval_judge: + judge_dict = { + "name": self._eval_judge.name, + "provider": self._eval_judge.provider, + "evaluators": list(self._eval_judge.evaluators), + } + + history = EvalHistoryPlugin( + history_dir=self._history_dir, + model=self._eval_model, + judge=judge_dict, + metadata=self._metadata, + ) + self.register_plugin(history) + + writer = EvalResultsWriter(history_dir=self._history_dir) + self.register_plugin(writer) + async def __aenter__(self) -> Self: self._register_fixtures() await self._resolver.__aenter__() @@ -345,7 +452,7 @@ async def __aexit__( exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool: - import time + import time # noqa: PLC0415 — only needed in __aexit__ teardown_start = time.perf_counter() set_session_teardown_capture(True) diff --git a/protest/core/suite.py b/protest/core/suite.py index 1176842..99b4fa2 100644 --- a/protest/core/suite.py +++ b/protest/core/suite.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar from protest.di.decorators import unwrap_fixture @@ -14,6 +14,7 @@ FixtureRegistration, Retry, Skip, + SuiteKind, SuitePath, TestRegistration, Xfail, @@ -21,6 +22,7 @@ normalize_skip, normalize_xfail, ) +from protest.evals.wrapper import make_eval_wrapper from protest.exceptions import ConcurrencyMismatchError, InvalidMaxConcurrencyError FuncT = TypeVar("FuncT", bound="Callable[..., object]") @@ -42,18 +44,22 @@ class ProTestSuite: description: Optional description for documentation purposes. """ - def __init__( + def __init__( # noqa: PLR0913 self, name: str, max_concurrency: int | None = None, tags: list[str] | None = None, description: str | None = None, + kind: SuiteKind = SuiteKind.TEST, + metadata: dict[str, Any] | None = None, ) -> None: if max_concurrency is not None and max_concurrency < 1: raise InvalidMaxConcurrencyError(max_concurrency) self._name = name + self._kind = kind self._description = description + self._metadata: dict[str, Any] = dict(metadata) if metadata else {} self._session: ProTestSession | None = None self._parent_suite: ProTestSuite | None = None self._tests: list[TestRegistration] = [] @@ -70,6 +76,14 @@ def name(self) -> str: def description(self) -> str | None: return self._description + @property + def kind(self) -> SuiteKind: + return self._kind + + @property + def suite_metadata(self) -> dict[str, Any]: + return self._metadata + @property def full_path(self) -> SuitePath: """Return hierarchical path: Parent::Child::GrandChild.""" @@ -122,6 +136,7 @@ def test( # noqa: PLR0913 - test decorator requires flexible params skip_reason: str = "Skipped", xfail: bool | str | Xfail | None = None, retry: int | Retry | None = None, + is_eval: bool = False, ) -> Callable[[FuncT], FuncT]: def decorator(func: FuncT) -> FuncT: if timeout is not None and timeout < 0: @@ -139,12 +154,35 @@ def decorator(func: FuncT) -> FuncT: xfail=norm_xfail, timeout=timeout, retry=norm_retry, + is_eval=is_eval, ) ) return func return decorator + def eval( + self, + evaluators: list[Any] | None = None, + expected_key: str = "expected", + tags: list[str] | None = None, + timeout: float | None = None, + judge: Any = None, + ) -> Callable[[FuncT], FuncT]: + """Register a scored eval test on this suite.""" + + def decorator(func: FuncT) -> FuncT: + wrapper = make_eval_wrapper( + func, + evaluators or [], + expected_key, + judge=judge, + ) + self.test(tags=tags, timeout=timeout, is_eval=True)(wrapper) + return func + + return decorator + def add_suite(self, suite: ProTestSuite) -> None: """Add a child suite. Child can access parent's fixtures.""" parent_effective = self.effective_max_concurrency diff --git a/protest/di/container.py b/protest/di/container.py index 8ab6e49..3a85ae0 100644 --- a/protest/di/container.py +++ b/protest/di/container.py @@ -11,7 +11,6 @@ Any, get_args, get_origin, - get_type_hints, overload, ) @@ -23,6 +22,7 @@ unwrap_fixture, ) from protest.di.factory import FixtureFactory +from protest.di.hints import get_type_hints_compat from protest.di.markers import Use from protest.di.proxy import FixtureErrorWrapper from protest.entities import ( @@ -741,8 +741,9 @@ async def _run_teardown_interruptible( """Run exit stack teardown, interruptible by cancellation event. Returns True if cancelled (should abort), False if completed normally. - Teardown runs in a thread pool so sync blocking code doesn't freeze - the event loop, allowing us to detect and respond to cancellation. + Teardown runs on the SAME event loop as fixture setup — creating a + new loop would break async resources (drivers, connections) that hold + references to the original loop. """ if interrupt_event is None: await exit_stack.__aexit__(exc_type, exc_val, exc_tb) @@ -751,23 +752,10 @@ async def _run_teardown_interruptible( if interrupt_event.is_set(): return True - # Run teardown in thread pool so sync code doesn't block event loop - loop = asyncio.get_running_loop() - - def run_sync_teardown() -> None: - # Create a new event loop for the thread to run async teardowns - new_loop = asyncio.new_event_loop() - try: - new_loop.run_until_complete( - exit_stack.__aexit__(exc_type, exc_val, exc_tb) - ) - finally: - new_loop.close() - - async def run_in_thread() -> None: - await loop.run_in_executor(None, run_sync_teardown) - - teardown_task = asyncio.create_task(run_in_thread()) + # Run teardown on the same loop, race with cancellation + teardown_task = asyncio.create_task( + exit_stack.__aexit__(exc_type, exc_val, exc_tb) + ) wait_cancel = asyncio.create_task(interrupt_event.wait()) done, _ = await asyncio.wait( @@ -793,10 +781,7 @@ def _analyze_and_store_dependencies( actual_func = unwrap_fixture(func) func_signature = signature(actual_func) - try: - type_hints = get_type_hints(actual_func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(actual_func) dependencies: dict[str, FixtureCallable] = {} for param_name, param in func_signature.parameters.items(): diff --git a/protest/di/hints.py b/protest/di/hints.py new file mode 100644 index 0000000..bd6a89b --- /dev/null +++ b/protest/di/hints.py @@ -0,0 +1,57 @@ +"""Type hints resolution with PEP 563 / TYPE_CHECKING compatibility. + +Shared by the core DI system and evals runner. Handles two failure modes: + +1. Local fixtures — ``from __future__ import annotations`` stringifies + annotations; names defined in local scopes aren't in ``func.__globals__``. + Fix: collect locals from the call stack. + +2. TYPE_CHECKING-only types — e.g. ``AsyncDriver`` imported only under + ``if TYPE_CHECKING:``. Fix: substitute ``Any`` for each unresolvable + name. The type itself is irrelevant for DI; only the ``Use(...)`` + marker inside ``Annotated[...]`` matters. +""" + +from __future__ import annotations + +import contextlib +import inspect +import re +from typing import Any, get_type_hints + + +def get_type_hints_compat(func: Any) -> dict[str, Any]: + """Resolve type hints with PEP 563 / TYPE_CHECKING fallbacks.""" + with contextlib.suppress(Exception): + return get_type_hints(func, include_extras=True) + + # Build a namespace from the entire call stack (covers local fixtures). + localns: dict[str, Any] = {} + with contextlib.suppress(Exception): + for frame_info in inspect.stack(): + localns.update(frame_info.frame.f_locals) + + with contextlib.suppress(Exception): + return get_type_hints(func, localns=localns, include_extras=True) + + # TYPE_CHECKING fallback: substitute Any for unresolvable names. + return _get_type_hints_substituting_any(func, localns) + + +def _get_type_hints_substituting_any( + func: Any, + localns: dict[str, Any], +) -> dict[str, Any]: + """Retry get_type_hints, replacing each NameError'd name with Any.""" + localns = dict(localns) + for _ in range(20): + try: + return get_type_hints(func, localns=localns, include_extras=True) + except NameError as exc: + match = re.search(r"name '(\w+)' is not defined", str(exc)) + if not match: + break + localns[match.group(1)] = Any + except Exception: + break + return {} diff --git a/protest/di/validation.py b/protest/di/validation.py index 2d6cd18..1026bca 100644 --- a/protest/di/validation.py +++ b/protest/di/validation.py @@ -3,8 +3,9 @@ from __future__ import annotations from inspect import signature -from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, get_args, get_origin +from protest.di.hints import get_type_hints_compat from protest.di.markers import ForEach, From from protest.exceptions import ParameterizedFixtureError from protest.utils import get_callable_name @@ -15,10 +16,7 @@ def _extract_from_params(func: Callable[..., Any]) -> dict[str, ForEach[Any]]: """Extract parameters annotated with From(source).""" - try: - type_hints = get_type_hints(func, include_extras=True) - except Exception: - type_hints = {} + type_hints = get_type_hints_compat(func) result: dict[str, ForEach[Any]] = {} for param_name in signature(func).parameters: diff --git a/protest/entities/__init__.py b/protest/entities/__init__.py index ec91eb9..3016ebb 100644 --- a/protest/entities/__init__.py +++ b/protest/entities/__init__.py @@ -4,12 +4,15 @@ FixtureMarker, FixtureRegistration, FixtureScope, + SuiteKind, TestItem, TestOutcome, TestRegistration, format_fixture_scope, ) from protest.entities.events import ( + EvalPayload, + EvalScoreEntry, FixtureInfo, HandlerInfo, RunResult, @@ -31,6 +34,8 @@ from protest.entities.xfail import Xfail, normalize_xfail __all__ = [ + "EvalPayload", + "EvalScoreEntry", "Fixture", "FixtureCallable", "FixtureInfo", @@ -44,6 +49,7 @@ "SessionResult", "SessionSetupInfo", "Skip", + "SuiteKind", "SuitePath", "SuiteResult", "SuiteSetupInfo", diff --git a/protest/entities/core.py b/protest/entities/core.py index 465c5d3..5a8c680 100644 --- a/protest/entities/core.py +++ b/protest/entities/core.py @@ -1,7 +1,7 @@ from __future__ import annotations from dataclasses import dataclass, field -from enum import Enum +from enum import Enum, StrEnum from typing import TYPE_CHECKING, Any, TypeAlias if TYPE_CHECKING: @@ -20,6 +20,13 @@ FixtureCallable: TypeAlias = "Callable[..., Any]" +class SuiteKind(StrEnum): + """Kind of suite — determines behavior (eval wiring, history, reporting).""" + + TEST = "test" + EVAL = "eval" + + class FixtureScope(Enum): """Scope level for fixtures.""" @@ -49,6 +56,7 @@ class TestRegistration: xfail: Xfail | None = None timeout: float | None = None retry: Retry | None = None + is_eval: bool = False @dataclass(frozen=True, slots=True) @@ -111,6 +119,7 @@ class TestItem: xfail: Xfail | None = None timeout: float | None = None retry: Retry | None = None + is_eval: bool = False @property def test_name(self) -> str: diff --git a/protest/entities/events.py b/protest/entities/events.py index f87d9d9..d67388d 100644 --- a/protest/entities/events.py +++ b/protest/entities/events.py @@ -1,13 +1,44 @@ from __future__ import annotations -from dataclasses import dataclass -from typing import TYPE_CHECKING +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from protest.entities import FixtureScope, SuitePath from protest.events.types import Event +@dataclass(frozen=True, slots=True) +class EvalScoreEntry: + """A single score entry from an evaluator.""" + + value: float | bool | str + passed: bool = True + skipped: bool = False + + +@dataclass(frozen=True, slots=True) +class EvalPayload: + """Structured payload for eval results, carried on TestResult.""" + + case_name: str + passed: bool + task_duration: float + inputs: Any = None + output: Any = None + expected_output: Any = None + scores: dict[str, EvalScoreEntry] = field(default_factory=dict) + case_hash: str = "" + eval_hash: str = "" + task_input_tokens: int = 0 + task_output_tokens: int = 0 + task_cost: float = 0.0 + judge_call_count: int = 0 + judge_input_tokens: int = 0 + judge_output_tokens: int = 0 + judge_cost: float = 0.0 + + @dataclass(frozen=True, slots=True) class TestCounts: passed: int = 0 @@ -43,6 +74,9 @@ class TestResult: attempt: int = 1 max_attempts: int = 1 previous_errors: tuple[Exception, ...] = () + is_eval: bool = False + eval_payload: EvalPayload | None = None + log_records: tuple[Any, ...] = () @dataclass(frozen=True, slots=True) diff --git a/protest/entities/suite_path.py b/protest/entities/suite_path.py index 38c78a2..4b7223e 100644 --- a/protest/entities/suite_path.py +++ b/protest/entities/suite_path.py @@ -58,6 +58,11 @@ def lower(self) -> str: """Return lowercase string representation for case-insensitive comparison.""" return self._path.lower() + @property + def root_name(self) -> str: + """Return the top-level suite name: 'A::B::C' -> 'A'.""" + return self.parts[0] if self.parts else "" + def __str__(self) -> str: return self._path diff --git a/protest/evals/__init__.py b/protest/evals/__init__.py new file mode 100644 index 0000000..d90b8f4 --- /dev/null +++ b/protest/evals/__init__.py @@ -0,0 +1,54 @@ +"""ProTest evals — native eval support.""" + +from protest.evals.evaluator import ( + EvalCase, + EvalContext, + Metric, + Reason, + ShortCircuit, + Verdict, + evaluator, +) +from protest.evals.types import ( + EvalCaseResult, + EvalScore, + EvalSuiteReport, + Judge, + JudgeInfo, + JudgeResponse, + ModelInfo, + ScoreStats, + TaskResult, +) + +__all__ = [ + "EvalCase", + "EvalCaseResult", + "EvalContext", + "EvalScore", + "EvalSession", + "EvalSuiteReport", + "Judge", + "JudgeInfo", + "JudgeResponse", + "Metric", + "ModelInfo", + "Reason", + "ScoreStats", + "ShortCircuit", + "TaskResult", + "Verdict", + "evaluator", +] + + +def __getattr__(name: str) -> object: + # EvalSession imports protest.core.session which imports reporters, + # and reporters import protest.evals.types — eagerly importing + # EvalSession here would create a circular import chain. + if name == "EvalSession": + from protest.evals.session import EvalSession # noqa: PLC0415 — circular import + + return EvalSession + msg = f"module {__name__!r} has no attribute {name!r}" + raise AttributeError(msg) diff --git a/protest/evals/evaluator.py b/protest/evals/evaluator.py new file mode 100644 index 0000000..6d0c980 --- /dev/null +++ b/protest/evals/evaluator.py @@ -0,0 +1,253 @@ +"""Evaluator primitives — functions, not classes. + +An evaluator is a callable that receives an EvalContext and returns a score. +The @evaluator decorator adds partial-application ergonomics: + + @evaluator + def contains_keywords(ctx: EvalContext, keywords: list[str]) -> ContainsKeywordsResult: + found = sum(1 for k in keywords if k.lower() in ctx.output.lower()) + return ContainsKeywordsResult(keyword_recall=found / len(keywords), ...) + + # Bind params → returns a callable(ctx) via functools.partial + evaluators=[contains_keywords(keywords=["paris", "france"])] + + # No params → use directly + @evaluator + def not_empty(ctx: EvalContext) -> bool: + return bool(ctx.output.strip()) + +Async evaluators are supported: + + @evaluator + async def llm_judge(ctx: EvalContext, model: str = "haiku") -> bool: + ... + +Evaluators return either bool (simple verdict) or a dataclass (structured result). +The framework reads fields by type: +- bool → verdict (pass/fail = all(bool_fields)) +- float → metric (aggregated in stats) +- str → reason (displayed on failure) +""" + +from __future__ import annotations + +import dataclasses +import functools +import inspect +from dataclasses import dataclass, field +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Generic, + TypeVar, + get_args, + get_origin, + get_type_hints, +) + +from protest.evals.hashing import _canonical +from protest.evals.types import EvalScore + +if TYPE_CHECKING: + from protest.evals.types import Judge + +InputT = TypeVar("InputT") +OutputT = TypeVar("OutputT") +T = TypeVar("T") + + +@dataclass +class EvalContext(Generic[InputT, OutputT]): + """Context passed to evaluator functions. + + Dual role: read-only DTO (inputs, output, expected) + mutable accumulator + for judge call stats (tokens, cost, call count). One instance per case, + shared sequentially across evaluators, discarded after scoring. + + Note: judge stats accumulate via ctx.judge() side-effects. If evaluators + are ever parallelized within a case, the accumulators will need isolation. + """ + + name: str + inputs: InputT + output: OutputT + expected_output: OutputT | None + metadata: Any + duration: float + _judge: Judge | None = field(default=None, repr=False) + _judge_call_count: int = field(default=0, repr=False, init=False) + _judge_input_tokens: int = field(default=0, repr=False, init=False) + _judge_output_tokens: int = field(default=0, repr=False, init=False) + _judge_cost: float = field(default=0.0, repr=False, init=False) + + async def judge(self, prompt: str, output_type: type[T]) -> T: + """Call the configured LLM judge and return the typed output. + + Tokens and cost from JudgeResponse are accumulated internally + and flow to EvalPayload for history/display. The evaluator + only sees the unwrapped output. + + Raises RuntimeError if no judge was configured on the session. + """ + if self._judge is None: + raise RuntimeError( + f"Evaluator for case '{self.name}' called ctx.judge() but no " + "judge is configured. Pass judge= to EvalSession()." + ) + self._judge_call_count += 1 + response = await self._judge.judge(prompt, output_type) + if response.input_tokens is not None: + self._judge_input_tokens += response.input_tokens + if response.output_tokens is not None: + self._judge_output_tokens += response.output_tokens + if response.cost is not None: + self._judge_cost += response.cost + return response.output + + @property + def judge_call_count(self) -> int: + return self._judge_call_count + + @property + def judge_input_tokens(self) -> int: + return self._judge_input_tokens + + @property + def judge_output_tokens(self) -> int: + return self._judge_output_tokens + + @property + def judge_cost(self) -> float: + return self._judge_cost + + +@dataclass +class EvalCase: + """Typed container for eval case data in ForEach. + + Usage:: + + cases = ForEach([ + EvalCase(inputs="Who is Marie?", expected="Marie, Resistance", name="lookup"), + EvalCase(inputs="Who is Pierre?", expected="Pierre, arrest"), + ]) + + @session.eval(evaluators=[contains_facts]) + def my_eval(case: Annotated[EvalCase, From(cases)]) -> str: + return ask(case.inputs) + """ + + inputs: Any + expected: Any = None + name: str = "" + evaluators: list[Any] = field(default_factory=list) + metadata: dict[str, Any] = field(default_factory=dict) + + def __repr__(self) -> str: + return self.name or f"EvalCase({self.inputs!r})" + + +class ShortCircuit: + """Group evaluators with fail-fast behavior. + + The first Verdict=False stops the group. Evaluators outside + the ShortCircuit run regardless. + + Usage:: + + evaluators=[ + not_empty, + ShortCircuit([ + contains_expected_facts(min_score=0.5), + llm_judge(rubric="..."), # skipped if above fails + ]), + ] + """ + + def __init__(self, evaluators: list[Any]) -> None: + self.evaluators = evaluators + + def evaluator_identity(self) -> dict[str, Any]: + """Identity is the ordered list of inner evaluators.""" + return {"short_circuit": [_canonical(e) for e in self.evaluators]} + + +class Metric: + """Annotate a float/int field as a metric for stats aggregation.""" + + +class Verdict: + """Annotate a bool field as a verdict for pass/fail.""" + + +class Reason: + """Annotate a str field as a reason displayed on failure.""" + + +def extract_scores_from_result(result: Any, evaluator_name: str) -> list[Any]: + """Extract EvalScore instances from an evaluator result. + + For bool returns: a single verdict named after the evaluator. + For dataclass returns: only fields annotated with Metric/Verdict/Reason + are extracted. Unannotated fields are ignored (free metadata). + + Raises: + TypeError: If result is not bool or dataclass. + """ + if isinstance(result, bool): + return [EvalScore(name=evaluator_name, value=result)] + + if dataclasses.is_dataclass(result) and not isinstance(result, type): + scores = [] + hints = get_type_hints(type(result), include_extras=True) + for f in dataclasses.fields(result): + ann = hints.get(f.name) + if ann is None or get_origin(ann) is not Annotated: + continue + for meta in get_args(ann)[1:]: + if isinstance(meta, type) and issubclass( + meta, (Metric, Verdict, Reason) + ): + scores.append(EvalScore(name=f.name, value=getattr(result, f.name))) + break + return scores + + type_name = type(result).__name__ + raise TypeError(f"Evaluator must return bool or dataclass, got {type_name}") + + +def evaluator(fn: Any) -> Any: + """Decorator that turns a function into a protest evaluator. + + The decorated function can be called two ways: + + 1. ``evaluator_fn(ctx)`` — evaluate directly + 2. ``evaluator_fn(keyword=value, ...)`` — returns a bound evaluator (partial) + + This is just ``functools.partial`` with nicer ergonomics: when the first + positional argument is an ``EvalContext``, the function evaluates. Otherwise, + all arguments are bound and the result is a new callable expecting only ``ctx``. + """ + sig = inspect.signature(fn) + params = list(sig.parameters.values()) + has_extra_params = len(params) > 1 + + @functools.wraps(fn) + def wrapper(*args: Any, **kwargs: Any) -> Any: + # Direct call: first positional arg is an EvalContext + if args and isinstance(args[0], EvalContext): + return fn(*args, **kwargs) + # Bind params → return partial + if has_extra_params and kwargs: + bound = functools.partial(fn, **kwargs) + # Preserve async detection on the partial + bound.__name__ = fn.__name__ # type: ignore[attr-defined] + bound.__qualname__ = fn.__qualname__ # type: ignore[attr-defined] + return bound + # No args at all — if no extra params, this IS the evaluator + if not has_extra_params and not args and not kwargs: + return fn + return fn(*args, **kwargs) + + return wrapper diff --git a/protest/evals/evaluators.py b/protest/evals/evaluators.py new file mode 100644 index 0000000..ec7d9bd --- /dev/null +++ b/protest/evals/evaluators.py @@ -0,0 +1,145 @@ +"""Built-in evaluators for common eval patterns. + +Evaluators return either bool (simple verdict) or a dataclass with +annotated fields: Annotated[bool, Verdict], Annotated[float, Metric], +Annotated[str, Reason]. Unannotated fields are ignored by the runner. +""" + +from __future__ import annotations + +import json as json_module +import re +from dataclasses import dataclass +from typing import Annotated, Any + +from protest.evals.evaluator import EvalContext, Metric, Verdict, evaluator + + +@dataclass(frozen=True, slots=True) +class ContainsKeywordsResult: + keyword_recall: Annotated[float, Metric] + all_keywords_present: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class DoesNotContainResult: + no_forbidden_words: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class MaxLengthResult: + conciseness: Annotated[float, Metric] + within_limit: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class JsonValidResult: + valid_json: Annotated[bool, Verdict] + has_required_keys: Annotated[bool, Verdict] + + +@dataclass(frozen=True, slots=True) +class WordOverlapResult: + overlap: Annotated[float, Metric] + + +@evaluator +def contains_keywords( + ctx: EvalContext[Any, str], keywords: list[str], min_recall: float = 0.0 +) -> ContainsKeywordsResult: + """Check that the output contains expected keywords (case-insensitive).""" + output_lower = ctx.output.lower() + found = sum(1 for kw in keywords if kw.lower() in output_lower) + total = len(keywords) + recall = found / total if total else 1.0 + return ContainsKeywordsResult( + keyword_recall=recall, + all_keywords_present=recall >= min_recall if min_recall > 0 else found == total, + ) + + +@evaluator +def contains_expected(ctx: EvalContext[Any, str], case_sensitive: bool = False) -> bool: + """Check that the output contains expected_output as a substring.""" + if ctx.expected_output is None: + return True + if case_sensitive: + return ctx.expected_output in ctx.output + return ctx.expected_output.lower() in ctx.output.lower() + + +@evaluator +def does_not_contain( + ctx: EvalContext[Any, str], forbidden: list[str], case_sensitive: bool = False +) -> DoesNotContainResult: + """Check that the output does not contain forbidden words.""" + output = ctx.output if case_sensitive else ctx.output.lower() + found = [w for w in forbidden if (w if case_sensitive else w.lower()) in output] + return DoesNotContainResult(no_forbidden_words=len(found) == 0) + + +@evaluator +def not_empty(ctx: EvalContext[Any, Any]) -> bool: + """Check that the output is not empty or whitespace-only.""" + if ctx.output is None: + return False + if isinstance(ctx.output, str): + return len(ctx.output.strip()) > 0 + return True + + +@evaluator +def max_length(ctx: EvalContext[Any, str], max_chars: int = 500) -> MaxLengthResult: + """Check that the output doesn't exceed a character limit.""" + length = len(ctx.output) + return MaxLengthResult( + conciseness=min(1.0, max_chars / max(length, 1)), + within_limit=length <= max_chars, + ) + + +@evaluator +def min_length(ctx: EvalContext[Any, str], min_chars: int = 1) -> bool: + """Check that the output meets a minimum length.""" + return len(ctx.output) >= min_chars + + +@evaluator +def matches_regex(ctx: EvalContext[Any, str], pattern: str, flags: int = 0) -> bool: + """Check that the output matches a regex pattern.""" + return bool(re.search(pattern, ctx.output, flags)) + + +@evaluator +def json_valid( + ctx: EvalContext[Any, str], required_keys: list[str] | None = None +) -> JsonValidResult: + """Check that the output is valid JSON, optionally with required keys.""" + if required_keys is None: + required_keys = [] + try: + parsed = json_module.loads(ctx.output) + except (json_module.JSONDecodeError, TypeError): + return JsonValidResult(valid_json=False, has_required_keys=False) + + has_keys = ( + all(k in parsed for k in required_keys) + if required_keys and isinstance(parsed, dict) + else True + ) + return JsonValidResult(valid_json=True, has_required_keys=has_keys) + + +@evaluator +def word_overlap(ctx: EvalContext[Any, str]) -> WordOverlapResult: + """Compute word overlap between output and expected_output (tracking-only).""" + if ctx.expected_output is None: + return WordOverlapResult(overlap=1.0) + expected = str(ctx.expected_output) + expected_words = set(expected.lower().split()) + output_words = set(ctx.output.lower().split()) + if not expected_words: + return WordOverlapResult(overlap=1.0) + return WordOverlapResult( + overlap=len(expected_words & output_words) / len(expected_words), + ) diff --git a/protest/evals/hashing.py b/protest/evals/hashing.py new file mode 100644 index 0000000..5ebe725 --- /dev/null +++ b/protest/evals/hashing.py @@ -0,0 +1,99 @@ +"""Content hashing for eval cases — detect when cases or scoring change. + +Hashes capture identity + configuration, not implementation. A renamed +parameter changes the hash; a rewritten function body does not. This is +a deliberate trade-off: we detect config drift, not code drift. + +Custom evaluators can implement ``evaluator_identity()`` to control +exactly what gets hashed. Built-in types (dataclass, functools.partial, +plain callable) are introspected automatically as a fallback. +""" + +from __future__ import annotations + +import dataclasses +import functools +import hashlib +import json +from typing import Any + +HASH_LENGTH = 12 + + +class CanonicalError(TypeError): + """Raised when an object cannot be converted to a canonical form.""" + + +def compute_case_hash(inputs: Any, expected_output: Any) -> str: + """Hash the case content (inputs + expected_output).""" + data = {"inputs": _canonical(inputs), "expected": _canonical(expected_output)} + return _hash(data) + + +def compute_eval_hash(evaluators: list[Any]) -> str: + """Hash the scoring config (evaluators only).""" + data = {"evaluators": [_canonical(e) for e in evaluators]} + return _hash(data) + + +def _hash(data: Any) -> str: + raw = json.dumps(data, sort_keys=True) + return hashlib.sha256(raw.encode()).hexdigest()[:HASH_LENGTH] + + +def _canonical(obj: Any) -> Any: # noqa: PLR0911 + """Convert an object to a canonical JSON-serializable form. + + Resolution order: + 1. Primitives, list, tuple, dict — native support + 2. ``evaluator_identity()`` — explicit, user-controlled + 3. Dataclass / functools.partial / callable — introspection fallback + 4. Anything else → CanonicalError + """ + # --- primitives & containers --- + if obj is None or isinstance(obj, (bool, int, float, str)): + return obj + if isinstance(obj, (list, tuple)): + return [_canonical(item) for item in obj] + if isinstance(obj, dict): + return {str(k): _canonical(v) for k, v in sorted(obj.items())} + + # --- explicit identity (user-controlled) --- + if hasattr(obj, "evaluator_identity"): + return _canonical(obj.evaluator_identity()) + + # --- introspection fallback --- + + # Dataclasses — public fields only (skip _ prefixed runtime internals) + if dataclasses.is_dataclass(obj) and not isinstance(obj, type): + return { + "__type__": type(obj).__qualname__, + **{ + f.name: _canonical(getattr(obj, f.name)) + for f in dataclasses.fields(obj) + if not f.name.startswith("_") + }, + } + # functools.partial — qualname + bound kwargs + if isinstance(obj, functools.partial): + return { + "fn": _fn_qualname(obj.func), + "args": _canonical(list(obj.args)) if obj.args else [], + "kwargs": _canonical(dict(obj.keywords)) if obj.keywords else {}, + } + # Plain callable — qualname only + if callable(obj): + qualname = _fn_qualname(obj) + if qualname is not None: + return {"fn": qualname} + + raise CanonicalError( + f"Cannot canonicalize {type(obj).__name__!r}. " + f"Implement evaluator_identity() or use a supported type " + f"(primitives, list, dict, dataclass, callable)." + ) + + +def _fn_qualname(fn: Any) -> str | None: + """Extract a stable qualified name from a callable.""" + return getattr(fn, "__qualname__", None) or getattr(fn, "__name__", None) diff --git a/protest/evals/history.py b/protest/evals/history.py new file mode 100644 index 0000000..010ddb8 --- /dev/null +++ b/protest/evals/history.py @@ -0,0 +1,165 @@ +"""EvalHistoryPlugin — persists eval run results as JSONL with model/scores.""" + +from __future__ import annotations + +import uuid +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any + +from protest.entities import SuiteKind +from protest.history.collector import collect_env_info, collect_git_info +from protest.history.storage import ( + DEFAULT_HISTORY_DIR, + HISTORY_FILE, + append_entry, + load_history, + load_previous_run, +) +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from pathlib import Path + + from protest.core.session import ProTestSession + from protest.evals.types import EvalCaseResult, EvalSuiteReport, ModelInfo + from protest.plugin import PluginContext + + +class EvalHistoryPlugin(PluginBase): + """Persists eval results to JSONL with model/judge/scores metadata. + + Listens to EVAL_SUITE_END events (emitted by the core runner). + """ + + name = "eval-history" + description = "Eval history tracking" + + def __init__( + self, + *, + history_dir: Path | None = None, + model: ModelInfo | None = None, + judge: dict[str, Any] | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: + self._history_dir = history_dir or DEFAULT_HISTORY_DIR + self._history_file = self._history_dir / HISTORY_FILE + self._model = model + self._judge = judge + self._metadata = dict(metadata) if metadata else {} + self._reports: dict[str, EvalSuiteReport] = {} + + _suite_metadata: dict[str, dict[str, Any]] + + @classmethod + def activate(cls, ctx: PluginContext) -> EvalHistoryPlugin | None: + return None # Wired explicitly by session + + def setup(self, session: ProTestSession) -> None: + """Collect per-suite metadata from session.""" + self._suite_metadata = {} + for suite in session.suites: + if suite.kind == SuiteKind.EVAL: + self._suite_metadata[suite.name] = suite.suite_metadata + + def on_eval_suite_end(self, report: EvalSuiteReport) -> None: + """Collect suite reports as they arrive.""" + self._reports[report.suite_name] = report + + def on_session_end(self, _result: Any) -> None: + """Write all collected reports to history.""" + if not self._reports: + return + entry = _build_entry( + self._reports, + self._model, + self._judge, + self._metadata, + self._suite_metadata, + ) + append_entry(self._history_file, entry) + + def load_entries(self, n: int | None = None) -> list[dict[str, Any]]: + """Load entries from history file.""" + return load_history(history_dir=self._history_dir, n=n, evals_only=True) + + +def _build_entry( + reports: dict[str, EvalSuiteReport], + model: ModelInfo | None, + judge: dict[str, Any] | None, + metadata: dict[str, Any] | None = None, + all_suite_metadata: dict[str, dict[str, Any]] | None = None, +) -> dict[str, Any]: + """Build a complete history entry covering all suites in the session.""" + suites_data: dict[str, Any] = {} + all_score_stats: list[Any] = [] + + for suite_name, report in reports.items(): + sm = (all_suite_metadata or {}).get(suite_name, {}) + suite_model = sm.get("model") or (model.name if model else None) + suite_provider = sm.get("provider") or (model.provider if model else None) + suites_data[suite_name] = { + "kind": "eval", + "model": suite_model, + "provider": suite_provider, + "total_cases": report.total_count, + "passed": report.passed_count, + "failed": report.failed_count, + "errored": report.errored_count, + "pass_rate": round(report.pass_rate, 4), + "duration": round(report.duration, 2), + "cases": {c.case_name: _serialize_case(c) for c in report.cases}, + } + all_score_stats.extend(report.all_score_stats()) + + scores_summary = { + s.name: { + "mean": round(s.mean, 4), + "median": round(s.median, 4), + "p5": round(s.p5, 4), + "p95": round(s.p95, 4), + "min": round(s.min, 4), + "max": round(s.max, 4), + "count": s.count, + } + for s in all_score_stats + } + + return { + "run_id": str(uuid.uuid4()), + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + "git": collect_git_info(), + "environment": collect_env_info(), + "metadata": dict(metadata) if metadata else {}, + "evals": { + "model": model.name if model else None, + "provider": model.provider if model else None, + "judge": judge, + "scores_summary": scores_summary, + }, + "suites": suites_data, + } + + +def _serialize_case(case: EvalCaseResult) -> dict[str, Any]: + entry: dict[str, Any] = { + "passed": case.passed, + "is_error": case.is_error, + "duration": round(case.duration, 3), + "scores": {s.name: s.value for s in case.scores if s.is_metric}, + "case_hash": case.case_hash, + "eval_hash": case.eval_hash, + } + labels = {s.name: s.value for s in case.scores if isinstance(s.value, str)} + if labels: + entry["labels"] = labels + assertions = {s.name: s.value for s in case.scores if isinstance(s.value, bool)} + if assertions: + entry["assertions"] = assertions + return entry + + +def load_previous_eval_run(history_dir: Any = None) -> dict[str, Any] | None: + """Load the most recent eval run from history.""" + return load_previous_run(history_dir=history_dir, evals_only=True) diff --git a/protest/evals/results_writer.py b/protest/evals/results_writer.py new file mode 100644 index 0000000..e069bba --- /dev/null +++ b/protest/evals/results_writer.py @@ -0,0 +1,149 @@ +"""EvalResultsWriter — writes per-case eval results as markdown files. + +Listens to TEST_PASS/FAIL events, filters for eval cases, and writes +a markdown file for each case to .protest/results/_/. +""" + +from __future__ import annotations + +import re +from datetime import datetime, timezone +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from protest.evals.types import EvalCaseResult, EvalScore, EvalSuiteReport +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from protest.entities.events import TestResult + from protest.plugin import PluginContext + +DEFAULT_RESULTS_DIR = Path(".protest") / "results" + + +class EvalResultsWriter(PluginBase): + """Writes per-case eval result files as markdown.""" + + name = "eval-results-writer" + description = "Write eval case result files" + + def __init__(self, history_dir: Path | None = None) -> None: + self._results_base = ( + (history_dir / "results") if history_dir else DEFAULT_RESULTS_DIR + ) + self._run_dirs: dict[str, Path] = {} + + @classmethod + def activate(cls, ctx: PluginContext) -> EvalResultsWriter | None: + return None # Wired explicitly by session + + def on_test_pass(self, result: TestResult) -> None: + self._maybe_write(result, passed=True) + + def on_test_fail(self, result: TestResult) -> None: + self._maybe_write(result, passed=False) + + def _maybe_write(self, result: TestResult, *, passed: bool) -> None: + if not result.is_eval or result.eval_payload is None: + return + suite_name = result.suite_path.root_name if result.suite_path else "evals" + case_result = _build_case_result(result, passed) + self._write_case_file(case_result, suite_name) + + def _write_case_file(self, case_result: EvalCaseResult, suite_name: str) -> None: + if suite_name not in self._run_dirs: + self._run_dirs[suite_name] = _make_run_dir(suite_name, self._results_base) + _write_case_file(case_result, self._run_dirs[suite_name]) + + def on_eval_suite_end(self, report: Any) -> None: + """Print results dir path for the suite.""" + + if not isinstance(report, EvalSuiteReport): + return + run_dir = self._run_dirs.get(report.suite_name) + if run_dir: + print(f" Results: {run_dir}") + + +def _build_case_result(result: TestResult, passed: bool) -> EvalCaseResult: + """Build EvalCaseResult from a TestResult with eval_payload.""" + payload = result.eval_payload + assert payload is not None + return EvalCaseResult( + case_name=payload.case_name or "", + node_id=result.node_id, + scores=tuple( + EvalScore( + name=name, + value=entry.value, + ) + for name, entry in payload.scores.items() + ), + duration=payload.task_duration, + passed=passed, + inputs=payload.inputs, + output=payload.output, + expected_output=payload.expected_output, + case_hash=payload.case_hash, + eval_hash=payload.eval_hash, + ) + + +# --------------------------------------------------------------------------- +# File writing helpers +# --------------------------------------------------------------------------- + + +def _make_run_dir(suite_name: str, base_dir: Path | None = None) -> Path: + """Create and return the timestamped directory for this run.""" + base = base_dir or DEFAULT_RESULTS_DIR + ts = datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S") + safe_suite = re.sub(r"[^\w\-]", "_", suite_name) + run_dir = base / f"{safe_suite}_{ts}" + run_dir.mkdir(parents=True, exist_ok=True) + return run_dir + + +def _write_case_file(case: EvalCaseResult, run_dir: Path) -> None: + """Write a markdown file for a single eval case.""" + safe_name = re.sub(r"[^\w\-]", "_", case.case_name) + path = run_dir / f"{safe_name}.md" + path.write_text(_render_case(case), encoding="utf-8") + + +def _render_case(case: EvalCaseResult) -> str: + status = "PASS ✓" if case.passed else "FAIL ✗" + duration = ( + f"{case.duration * 1000:.0f}ms" + if case.duration < 1 + else f"{case.duration:.2f}s" + ) + lines: list[str] = [ + f"# {case.case_name} — {status} ({duration})", + "", + ] + + lines += ["## Input", "", _format_value(case.inputs), ""] + lines += ["## Output", "", _format_value(case.output), ""] + lines += ["## Expected", "", _format_value(case.expected_output), ""] + + if case.scores: + lines += ["## Scores", ""] + for score in case.scores: + lines.append(_format_score(score)) + lines.append("") + + return "\n".join(lines) + + +def _format_score(score: EvalScore) -> str: + icon = "·" if score.is_metric else ("✓" if score.passed else "✗") + return f"- **{score.name}**: {score.value} {icon}" + + +def _format_value(value: Any) -> str: + if value is None: + return "_none_" + if isinstance(value, str): + return value if value.strip() else "_empty string_" + return f"```\n{value!r}\n```" diff --git a/protest/evals/session.py b/protest/evals/session.py new file mode 100644 index 0000000..09f0d5c --- /dev/null +++ b/protest/evals/session.py @@ -0,0 +1,49 @@ +"""EvalSession — session dédiée aux evals.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from protest.core.session import ProTestSession +from protest.evals.types import JudgeInfo + +if TYPE_CHECKING: + from pathlib import Path + + from protest.evals.types import Judge, ModelInfo + + +class EvalSession(ProTestSession): + """Session dédiée aux evals. + + Usage:: + + session = EvalSession(model=ModelInfo(name="qwen-2.5")) + + @session.eval(evaluators=[contains_facts]) + async def chatbot(case: Annotated[dict, From(cases)]) -> str: + return await ask(case["q"]) + """ + + def __init__( + self, + *, + model: ModelInfo | None = None, + judge: Judge | None = None, + concurrency: int = 1, + history: bool = True, + history_dir: Path | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: + super().__init__( + concurrency=concurrency, + history=history, + history_dir=history_dir, + metadata=metadata, + ) + self._eval_model = model + self._eval_judge_instance: Judge | None = judge + if judge is not None: + self._eval_judge = JudgeInfo(name=judge.name, provider=judge.provider) + else: + self._eval_judge = None diff --git a/protest/evals/types.py b/protest/evals/types.py new file mode 100644 index 0000000..59d2721 --- /dev/null +++ b/protest/evals/types.py @@ -0,0 +1,293 @@ +"""Types for eval results, scores, and run context.""" + +from __future__ import annotations + +import statistics +from dataclasses import dataclass, field +from typing import Any, Generic, Protocol, TypeVar, runtime_checkable + +T = TypeVar("T") + + +@dataclass(frozen=True, slots=True) +class TaskResult(Generic[T]): + """Optional wrapper for eval task return values with usage stats. + + Return this instead of a plain value to report LLM usage for the + system under test. ProTest unwraps it transparently — evaluators + see the plain output. + + Usage:: + + @session.eval(evaluators=[...]) + async def my_eval(case) -> TaskResult[str]: + result = await agent.run(case.inputs) + usage = result.usage() + return TaskResult( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=0.003, + ) + + # Or just return str directly — TaskResult is opt-in. + """ + + output: T + input_tokens: int | None = None + output_tokens: int | None = None + cost: float | None = None + + +@dataclass(frozen=True, slots=True) +class JudgeResponse(Generic[T]): + """Return type for Judge.judge() — wraps the output with optional usage stats. + + Evaluators never see this: ``ctx.judge()`` unwraps and returns ``output``. + ProTest accumulates tokens/cost for history and display. + + Usage:: + + return JudgeResponse( + output=result.output, + input_tokens=usage.request_tokens, + output_tokens=usage.response_tokens, + cost=0.003, + ) + + # Or minimal — tokens/cost are optional: + return JudgeResponse(output=result.output) + """ + + output: T + input_tokens: int | None = None + output_tokens: int | None = None + cost: float | None = None + + +@runtime_checkable +class Judge(Protocol): + """Protocol for LLM judge implementations. + + All configuration (model, temperature, system_prompt, max_tokens) + lives in the constructor of the implementation, NOT in this protocol. + + Usage:: + + class MyJudge: + name = "my-judge" + provider = "openai" + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: + result = await agent.run(prompt) + return JudgeResponse(output=result.output, input_tokens=100) + + session = EvalSession(judge=MyJudge()) + """ + + name: str + provider: str | None + + async def judge(self, prompt: str, output_type: type[T]) -> JudgeResponse[T]: ... + + +@dataclass(frozen=True, slots=True) +class ModelInfo: + """Metadata about the model being evaluated.""" + + name: str + provider: str | None = None + temperature: float | None = None + extra: dict[str, Any] = field(default_factory=dict) + + +@dataclass(frozen=True, slots=True) +class JudgeInfo: + """Metadata about the LLM judge used for evaluation.""" + + name: str + provider: str | None = None + evaluators: tuple[str, ...] = () + extra: dict[str, Any] = field(default_factory=dict) + + +@dataclass(frozen=True, slots=True) +class EvalScore: + """A single named value from an evaluator result. + + Values are categorized by type: + - bool → verdict (pass/fail) + - float → metric (aggregated in stats) + - str → reason (displayed on failure) + """ + + name: str + value: float | bool | str + skipped: bool = False + + @property + def is_verdict(self) -> bool: + return not self.skipped and isinstance(self.value, bool) + + @property + def is_metric(self) -> bool: + return ( + not self.skipped + and isinstance(self.value, (int, float)) + and not isinstance(self.value, bool) + ) + + @property + def is_reason(self) -> bool: + return not self.skipped and isinstance(self.value, str) + + @property + def passed(self) -> bool: + if self.skipped: + return True # skipped scores don't affect pass/fail + if isinstance(self.value, bool): + return self.value + return True + + +@dataclass(frozen=True, slots=True) +class EvalCaseResult: + """Complete result of evaluating a single case.""" + + case_name: str + node_id: str + scores: tuple[EvalScore, ...] + duration: float + passed: bool + inputs: Any = None + output: Any = None + expected_output: Any = None + case_hash: str = "" + eval_hash: str = "" + task_input_tokens: int = 0 + task_output_tokens: int = 0 + task_cost: float = 0.0 + judge_call_count: int = 0 + judge_input_tokens: int = 0 + judge_output_tokens: int = 0 + judge_cost: float = 0.0 + is_error: bool = False + + @property + def numeric_scores(self) -> dict[str, float]: + return {s.name: float(s.value) for s in self.scores if s.is_metric} + + @property + def failed_scores(self) -> tuple[EvalScore, ...]: + return tuple(s for s in self.scores if not s.passed) + + +@dataclass(frozen=True, slots=True) +class ScoreStats: + """Aggregated statistics for a named score across cases.""" + + name: str + mean: float + median: float + p5: float + p95: float + min: float + max: float + count: int + + @classmethod + def from_values(cls, name: str, values: list[float]) -> ScoreStats: + if not values: + return cls(name=name, mean=0, median=0, p5=0, p95=0, min=0, max=0, count=0) + sv = sorted(values) + n = len(sv) + return cls( + name=name, + mean=statistics.mean(sv), + median=statistics.median(sv), + p5=sv[max(0, int(n * 0.05))], + p95=sv[min(n - 1, int(n * 0.95))], + min=sv[0], + max=sv[-1], + count=n, + ) + + +@dataclass(frozen=True, slots=True) +class EvalSuiteReport: + """Aggregated report for a suite of eval cases.""" + + suite_name: str + cases: tuple[EvalCaseResult, ...] + duration: float + + @property + def passed_count(self) -> int: + return sum(1 for c in self.cases if c.passed) + + @property + def failed_count(self) -> int: + return sum(1 for c in self.cases if not c.passed and not c.is_error) + + @property + def errored_count(self) -> int: + return sum(1 for c in self.cases if c.is_error) + + @property + def total_count(self) -> int: + return len(self.cases) + + @property + def pass_rate(self) -> float: + return self.passed_count / self.total_count if self.cases else 0.0 + + def score_names(self) -> set[str]: + return {s.name for c in self.cases for s in c.scores if s.is_metric} + + def score_stats(self, name: str) -> ScoreStats: + values = [ + float(s.value) + for c in self.cases + for s in c.scores + if s.name == name and s.is_metric + ] + return ScoreStats.from_values(name, values) + + def all_score_stats(self) -> list[ScoreStats]: + return [self.score_stats(n) for n in sorted(self.score_names())] + + @property + def total_task_input_tokens(self) -> int: + return sum(c.task_input_tokens for c in self.cases) + + @property + def total_task_output_tokens(self) -> int: + return sum(c.task_output_tokens for c in self.cases) + + @property + def total_task_tokens(self) -> int: + return self.total_task_input_tokens + self.total_task_output_tokens + + @property + def total_task_cost(self) -> float: + return sum(c.task_cost for c in self.cases) + + @property + def total_judge_calls(self) -> int: + return sum(c.judge_call_count for c in self.cases) + + @property + def total_judge_input_tokens(self) -> int: + return sum(c.judge_input_tokens for c in self.cases) + + @property + def total_judge_output_tokens(self) -> int: + return sum(c.judge_output_tokens for c in self.cases) + + @property + def total_judge_tokens(self) -> int: + return self.total_judge_input_tokens + self.total_judge_output_tokens + + @property + def total_judge_cost(self) -> float: + return sum(c.judge_cost for c in self.cases) diff --git a/protest/evals/wrapper.py b/protest/evals/wrapper.py new file mode 100644 index 0000000..bc2569b --- /dev/null +++ b/protest/evals/wrapper.py @@ -0,0 +1,232 @@ +"""Eval wrapper — turns a function into a scored eval test. + +The wrapper intercepts the return value, runs evaluators, and returns +an EvalPayload. The rest of the pipeline (executor, outcome builder, +reporters) handles it like any eval test. +""" + +from __future__ import annotations + +import asyncio +import functools +import time +from typing import Any + +from protest.entities.events import EvalPayload, EvalScoreEntry +from protest.evals.evaluator import ( + EvalContext, + ShortCircuit, + extract_scores_from_result, +) +from protest.evals.hashing import compute_case_hash, compute_eval_hash +from protest.evals.types import EvalScore, TaskResult +from protest.exceptions import FixtureError + + +def make_eval_wrapper( + func: Any, + evaluators: list[Any], + expected_key: str, + judge: Any = None, +) -> Any: + """Wrap a function to run evaluators on its return value.""" + + @functools.wraps(func) + async def eval_wrapper(**kwargs: Any) -> EvalPayload: + expected = _extract_expected(kwargs, expected_key) + case_name = _extract_case_name(kwargs, func.__name__) + inputs = _extract_inputs(kwargs) + metadata = _extract_metadata(kwargs) + + start = time.perf_counter() + if asyncio.iscoroutinefunction(func): + raw_output = await func(**kwargs) + else: + raw_output = func(**kwargs) + task_duration = time.perf_counter() - start + + # Unwrap TaskResult if returned + task_input_tokens = 0 + task_output_tokens = 0 + task_cost = 0.0 + if isinstance(raw_output, TaskResult): + output = raw_output.output + task_input_tokens = raw_output.input_tokens or 0 + task_output_tokens = raw_output.output_tokens or 0 + task_cost = raw_output.cost or 0.0 + else: + output = raw_output + + all_evaluators = list(evaluators) + per_case = _extract_per_case_evaluators(kwargs) + all_evaluators.extend(per_case) + + scores, eval_ctx = await run_evaluators( + all_evaluators, + case_name, + inputs, + output, + expected, + metadata, + task_duration, + judge=judge, + ) + + return EvalPayload( + case_name=case_name, + passed=all(s.passed for s in scores), + task_duration=task_duration, + inputs=inputs, + output=output, + expected_output=expected, + scores={ + s.name: EvalScoreEntry( + value=s.value, + passed=s.passed, + skipped=s.skipped, + ) + for s in scores + }, + case_hash=compute_case_hash(inputs, expected), + eval_hash=compute_eval_hash(all_evaluators), + task_input_tokens=task_input_tokens, + task_output_tokens=task_output_tokens, + task_cost=task_cost, + judge_call_count=eval_ctx.judge_call_count, + judge_input_tokens=eval_ctx.judge_input_tokens, + judge_output_tokens=eval_ctx.judge_output_tokens, + judge_cost=eval_ctx.judge_cost, + ) + + return eval_wrapper + + +# --------------------------------------------------------------------------- +# Extract helpers — pull data from case_kwargs (dict or dataclass) +# --------------------------------------------------------------------------- + + +def _get(obj: Any, key: str, default: Any = None) -> Any: + """Get a value from a dict or dataclass by key/attr name.""" + if isinstance(obj, dict): + return obj.get(key, default) + return getattr(obj, key, default) + + +def _is_case_data(v: Any) -> bool: + """Check if a value looks like case data (dict or has 'expected'/'q'/'inputs').""" + if isinstance(v, dict): + return True + return hasattr(v, "expected") or hasattr(v, "q") or hasattr(v, "inputs") + + +def _extract_expected(kwargs: dict[str, Any], key: str) -> Any: + for v in kwargs.values(): + if _is_case_data(v): + val = _get(v, key) + if val is not None: + return val + return None + + +def _extract_case_name(kwargs: dict[str, Any], fallback: str) -> str: + for v in kwargs.values(): + if _is_case_data(v): + name = _get(v, "name") + if name: + return str(name) + return fallback + + +def _extract_inputs(kwargs: dict[str, Any]) -> Any: + for v in kwargs.values(): + if _is_case_data(v): + return _get(v, "inputs") or _get(v, "q") or _get(v, "input") + return None + + +def _extract_metadata(kwargs: dict[str, Any]) -> Any: + for v in kwargs.values(): + if _is_case_data(v): + val = _get(v, "metadata") + if val is not None: + return val + return None + + +def _extract_per_case_evaluators(kwargs: dict[str, Any]) -> list[Any]: + for v in kwargs.values(): + if _is_case_data(v): + evs = _get(v, "evaluators") + if evs: + return list(evs) + return [] + + +# --------------------------------------------------------------------------- +# Evaluator execution +# --------------------------------------------------------------------------- + + +async def run_evaluators( + evaluators: list[Any], + case_name: str, + inputs: Any, + output: Any, + expected_output: Any, + metadata: Any, + duration: float, + judge: Any = None, +) -> tuple[list[EvalScore], EvalContext[Any, Any]]: + """Run evaluators and return (scores, ctx with judge stats).""" + ctx = EvalContext( + name=case_name, + inputs=inputs, + output=output, + expected_output=expected_output, + metadata=metadata, + duration=duration, + _judge=judge, + ) + + scores: list[EvalScore] = [] + for ev in evaluators: + if isinstance(ev, ShortCircuit): + scores.extend(await _run_short_circuit(ev.evaluators, ctx)) + continue + + evaluator_name = getattr(ev, "__name__", type(ev).__name__) + try: + raw = ev(ctx) + result = await raw if asyncio.iscoroutine(raw) else raw + scores.extend(extract_scores_from_result(result, evaluator_name)) + except Exception as exc: + raise FixtureError(f"evaluator '{evaluator_name}'", exc) from exc + + return scores, ctx + + +async def _run_short_circuit( + evaluators: list[Any], + ctx: EvalContext[Any, Any], +) -> list[EvalScore]: + """Run evaluators in order, stop at first Verdict=False.""" + scores: list[EvalScore] = [] + for i, ev in enumerate(evaluators): + evaluator_name = getattr(ev, "__name__", type(ev).__name__) + try: + raw = ev(ctx) + result = await raw if asyncio.iscoroutine(raw) else raw + except Exception as exc: + raise FixtureError(f"evaluator '{evaluator_name}'", exc) from exc + extracted = extract_scores_from_result(result, evaluator_name) + scores.extend(extracted) + if any(s.is_verdict and not s.passed for s in extracted): + # Mark remaining evaluators as skipped + for skipped_ev in evaluators[i + 1 :]: + skipped_name = getattr( + skipped_ev, "__name__", type(skipped_ev).__name__ + ) + scores.append(EvalScore(name=skipped_name, value=False, skipped=True)) + break + return scores diff --git a/protest/events/types.py b/protest/events/types.py index 8f4d1fc..05d9fa2 100644 --- a/protest/events/types.py +++ b/protest/events/types.py @@ -16,6 +16,7 @@ class Event(Enum): SUITE_SETUP_DONE = "suite_setup_done" SUITE_TEARDOWN_START = "suite_teardown_start" SUITE_END = "suite_end" + EVAL_SUITE_END = "eval_suite_end" TEST_START = "test_start" TEST_ACQUIRED = "test_acquired" TEST_SETUP_DONE = "test_setup_done" @@ -34,3 +35,4 @@ class Event(Enum): FIXTURE_TEARDOWN_START = "fixture_teardown_start" FIXTURE_TEARDOWN_DONE = "fixture_teardown_done" SESSION_INTERRUPTED = "session_interrupted" + USER_PRINT = "user_print" diff --git a/protest/execution/capture.py b/protest/execution/capture.py index d05fe00..2e258a7 100644 --- a/protest/execution/capture.py +++ b/protest/execution/capture.py @@ -19,6 +19,7 @@ ) _current_node_id: ContextVar[str | None] = ContextVar("current_node_id", default=None) +_event_bus_ref: ContextVar[object | None] = ContextVar("event_bus_ref", default=None) @dataclass(slots=True) @@ -100,6 +101,21 @@ def get_session_teardown_output() -> str: return _session_teardown.buffer.getvalue() if _session_teardown.buffer else "" +def set_event_bus(bus: object) -> Token[object | None]: + """Set event bus reference for console.print() access.""" + return _event_bus_ref.set(bus) + + +def reset_event_bus(token: Token[object | None]) -> None: + """Reset event bus reference.""" + _event_bus_ref.reset(token) + + +def get_event_bus() -> object | None: + """Get current event bus (for console.print).""" + return _event_bus_ref.get() + + class TaskAwareStream: def __init__(self, original_stream: TextIO, show_output: bool = False) -> None: self._original = original_stream diff --git a/protest/filters/kind.py b/protest/filters/kind.py new file mode 100644 index 0000000..076684a --- /dev/null +++ b/protest/filters/kind.py @@ -0,0 +1,37 @@ +"""KindFilterPlugin — filters tests by suite kind (test/eval).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from protest.entities import SuiteKind +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from protest.entities import TestItem + from protest.plugin import PluginContext + + +class KindFilterPlugin(PluginBase): + """Filters collected tests by suite kind ('test' or 'eval').""" + + name = "kind-filter" + description = "Filter by suite kind" + + def __init__(self, kind: SuiteKind) -> None: + self._kind = kind + + @classmethod + def activate(cls, ctx: PluginContext) -> KindFilterPlugin | None: + kind = ctx.get("kind_filter") + if kind: + return cls(kind=SuiteKind(kind)) + return None + + def on_collection_finish(self, items: list[TestItem]) -> list[TestItem]: + return [item for item in items if self._matches(item)] + + def _matches(self, item: TestItem) -> bool: + if item.suite is None: + return self._kind == SuiteKind.TEST + return item.suite.kind == self._kind diff --git a/protest/history/__init__.py b/protest/history/__init__.py new file mode 100644 index 0000000..5183cf7 --- /dev/null +++ b/protest/history/__init__.py @@ -0,0 +1,17 @@ +"""History module — run tracking for tests and evals.""" + +from protest.history.storage import ( + HISTORY_FILE, + append_entry, + clean_dirty, + load_history, + load_previous_run, +) + +__all__ = [ + "HISTORY_FILE", + "append_entry", + "clean_dirty", + "load_history", + "load_previous_run", +] diff --git a/protest/history/collector.py b/protest/history/collector.py new file mode 100644 index 0000000..ee8bb1a --- /dev/null +++ b/protest/history/collector.py @@ -0,0 +1,81 @@ +"""Metadata collection: git info, environment, CI detection.""" + +from __future__ import annotations + +import os +import platform +import subprocess +import sys +from typing import Any + + +def collect_git_info() -> dict[str, Any] | None: + """Collect git context. Returns None if not in a git repo.""" + try: + commit = _git("rev-parse", "HEAD") + return { + "commit": commit, + "commit_short": commit[:7] if commit else None, + "branch": _git("rev-parse", "--abbrev-ref", "HEAD"), + "dirty": bool(_git("status", "--porcelain")), + "author": _git("log", "-1", "--format=%an"), + "commit_message": _git("log", "-1", "--format=%s"), + } + except (FileNotFoundError, subprocess.CalledProcessError): + return None + + +def collect_env_info() -> dict[str, Any]: + """Collect environment metadata.""" + ci_provider = detect_ci_provider() + return { + "python_version": platform.python_version(), + "protest_version": _get_pkg_version("protest"), + "pydantic_evals_version": _get_pkg_version("pydantic-evals"), + "hostname": platform.node(), + "os": sys.platform, + "ci": ci_provider is not None, + "ci_provider": ci_provider, + } + + +_CI_PROVIDERS: dict[str, str] = { + "GITHUB_ACTIONS": "github-actions", + "GITLAB_CI": "gitlab-ci", + "CIRCLECI": "circleci", + "BUILDKITE": "buildkite", + "TRAVIS": "travis-ci", +} + + +def detect_ci_provider() -> str | None: + """Detect CI provider from standard environment variables.""" + env = os.environ + for var, name in _CI_PROVIDERS.items(): + if env.get(var) == "true": + return name + if env.get("JENKINS_URL"): + return "jenkins" + if env.get("CI") == "true": + return "unknown" + return None + + +def _git(*args: str) -> str: + result = subprocess.run( + ["git", *args], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ) + return result.stdout.strip() + + +def _get_pkg_version(name: str) -> str | None: + try: + from importlib.metadata import version # noqa: PLC0415 — inside try/except + + return version(name) + except Exception: + return None diff --git a/protest/history/plugin.py b/protest/history/plugin.py new file mode 100644 index 0000000..c8a0f79 --- /dev/null +++ b/protest/history/plugin.py @@ -0,0 +1,100 @@ +"""HistoryPlugin — persists test run results as JSONL.""" + +from __future__ import annotations + +import uuid +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any + +from protest.history.collector import collect_env_info, collect_git_info +from protest.history.storage import DEFAULT_HISTORY_DIR, HISTORY_FILE, append_entry +from protest.plugin import PluginBase + +if TYPE_CHECKING: + from pathlib import Path + + from protest.core.session import ProTestSession + from protest.entities import SuiteKind + from protest.entities.events import SessionResult, TestResult + from protest.plugin import PluginContext + + +class HistoryPlugin(PluginBase): + """Persists test results to JSONL for run-over-run tracking.""" + + name = "history" + description = "Test history tracking" + + def __init__(self, history_dir: Path | None = None) -> None: + self._history_dir = history_dir or DEFAULT_HISTORY_DIR + self._history_file = self._history_dir / HISTORY_FILE + self._suites: dict[str, dict[str, dict[str, Any]]] = {} + self._suite_kinds: dict[str, SuiteKind] = {} + self._default_suite_name: str = "tests" + self._history_enabled: bool = False + self._metadata: dict[str, Any] = {} + + @classmethod + def activate(cls, ctx: PluginContext) -> HistoryPlugin | None: + return None # Wired explicitly by session + + def setup(self, session: ProTestSession) -> None: + self._history_enabled = session.history + self._metadata = dict(session.metadata) + for suite in session.suites: + self._suite_kinds[suite.name] = suite.kind + if not self._default_suite_name or self._default_suite_name == "tests": + self._default_suite_name = suite.name + + def on_test_pass(self, result: TestResult) -> None: + if result.is_eval: + return + self._record(result, passed=True) + + def on_test_fail(self, result: TestResult) -> None: + if result.is_eval: + return + self._record(result, passed=False) + + def on_session_end(self, _result: SessionResult) -> None: + if not self._history_enabled or not self._suites: + return + + suites_data: dict[str, Any] = {} + for suite_name, cases in self._suites.items(): + total = len(cases) + passed = sum(1 for c in cases.values() if c["passed"]) + suites_data[suite_name] = { + "kind": self._suite_kinds.get(suite_name, "test"), + "total_cases": total, + "passed": passed, + "failed": total - passed, + "pass_rate": round(passed / total, 4) if total else 0, + "duration": round(sum(c["duration"] for c in cases.values()), 2), + "cases": cases, + } + + entry: dict[str, Any] = { + "run_id": str(uuid.uuid4()), + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + "git": collect_git_info(), + "environment": collect_env_info(), + "metadata": self._metadata, + "evals": None, + "suites": suites_data, + } + append_entry(self._history_file, entry) + + def _record(self, result: TestResult, *, passed: bool) -> None: + suite_name = self._get_suite_name(result) + if suite_name not in self._suites: + self._suites[suite_name] = {} + self._suites[suite_name][result.name] = { + "passed": passed, + "duration": round(result.duration, 3), + } + + def _get_suite_name(self, result: TestResult) -> str: + if result.suite_path: + return result.suite_path.root_name + return self._default_suite_name diff --git a/protest/history/storage.py b/protest/history/storage.py new file mode 100644 index 0000000..5dbe047 --- /dev/null +++ b/protest/history/storage.py @@ -0,0 +1,135 @@ +"""JSONL history storage: load, append, filter, clean.""" + +from __future__ import annotations + +import json +import subprocess +from pathlib import Path +from typing import Any + +DEFAULT_HISTORY_DIR = Path(".protest") +HISTORY_FILE = "history.jsonl" + + +def load_history( + history_dir: Path | None = None, + n: int | None = None, + model: str | None = None, + suite: str | None = None, + evals_only: bool = False, + tests_only: bool = False, +) -> list[dict[str, Any]]: + """Load history entries with optional filtering.""" + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return [] + + entries: list[dict[str, Any]] = [] + for line in path.read_text().strip().splitlines(): + try: + entry = json.loads(line) + except json.JSONDecodeError: + continue + if evals_only and not _has_suite_kind(entry, "eval"): + continue + if tests_only and not _has_suite_kind(entry, "test"): + continue + if model and (entry.get("evals") or {}).get("model") != model: + continue + if suite and suite not in entry.get("suites", {}): + continue + entries.append(entry) + + entries.sort(key=lambda e: e.get("timestamp", "")) + if n is not None: + entries = entries[-n:] + return entries + + +def _has_suite_kind(entry: dict[str, Any], kind: str) -> bool: + """Check if entry has at least one suite with the given kind.""" + suites = entry.get("suites", {}) + for suite_data in suites.values(): + if isinstance(suite_data, dict) and suite_data.get("kind") == kind: + return True + # Legacy fallback: entries without kind field + if not any(isinstance(s, dict) and "kind" in s for s in suites.values()): + if kind == "eval": + return entry.get("evals") is not None + if kind == "test": + return entry.get("evals") is None + return False + + +def append_entry(path: Path, entry: dict[str, Any]) -> None: + """Append a single JSON entry to a JSONL file. + + Note: no file locking — concurrent writes from separate processes + could corrupt the file. In practice, protest runs are single-process + (async workers share the same process). If concurrent CI jobs write + to the same history file, consider using separate history_dir per job. + """ + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "a") as f: + f.write(json.dumps(entry, default=str) + "\n") + + +def load_previous_run( + history_dir: Path | None = None, + evals_only: bool = False, +) -> dict[str, Any] | None: + """Load the most recent history entry.""" + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return None + lines = path.read_text().strip().splitlines() + for line in reversed(lines): + try: + entry = json.loads(line) + except json.JSONDecodeError: + continue + if evals_only and entry.get("evals") is None: + continue + return dict(entry) + return None + + +def clean_dirty(history_dir: Path | None = None) -> int: + """Remove entries where git.dirty=True AND git.commit matches current HEAD. + + Returns the number of entries removed. + """ + path = (history_dir or DEFAULT_HISTORY_DIR) / HISTORY_FILE + if not path.exists(): + return 0 + + try: + current_commit = subprocess.run( + ["git", "rev-parse", "HEAD"], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ).stdout.strip() + except (FileNotFoundError, subprocess.CalledProcessError): + return 0 + + lines = path.read_text().strip().splitlines() + kept: list[str] = [] + removed = 0 + + for line in lines: + try: + entry = json.loads(line) + except json.JSONDecodeError: + kept.append(line) + continue + git = entry.get("git") or {} + if git.get("dirty") and git.get("commit") == current_commit: + removed += 1 + else: + kept.append(line) + + if removed: + path.write_text("\n".join(kept) + "\n" if kept else "") + return removed diff --git a/protest/plugin.py b/protest/plugin.py index 6833b03..9589fff 100644 --- a/protest/plugin.py +++ b/protest/plugin.py @@ -142,6 +142,12 @@ def on_suite_teardown_start(self, path: SuitePath) -> None | Awaitable[None]: def on_suite_end(self, result: SuiteResult) -> None | Awaitable[None]: """Suite ends (after fixture teardown).""" + def on_eval_suite_end(self, report: Any) -> None | Awaitable[None]: + """Eval suite finished — aggregated report with scores/stats.""" + + def on_user_print(self, data: Any) -> None | Awaitable[None]: + """User-initiated print via protest.console.print().""" + # ───────────────────────────────────────────────────────────────────── # Fixture lifecycle # ───────────────────────────────────────────────────────────────────── diff --git a/protest/reporting/ascii.py b/protest/reporting/ascii.py index 9ff7211..9296ae6 100644 --- a/protest/reporting/ascii.py +++ b/protest/reporting/ascii.py @@ -1,8 +1,11 @@ +import sys import traceback from pathlib import Path +from typing import Any from typing_extensions import Self +from protest.console import strip_markup from protest.entities import ( FixtureInfo, HandlerInfo, @@ -18,6 +21,7 @@ TestStartInfo, TestTeardownInfo, ) +from protest.evals.types import EvalSuiteReport from protest.plugin import PluginBase, PluginContext from protest.reporting.verbosity import Verbosity @@ -59,6 +63,28 @@ def _format_duration(seconds: float) -> str: return f"{seconds:.2f}s" +_TOKEN_K_THRESHOLD = 1000 + + +def _format_tokens(tokens: int) -> str: + return ( + f"{tokens / _TOKEN_K_THRESHOLD:.1f}k" + if tokens >= _TOKEN_K_THRESHOLD + else str(tokens) + ) + + +def _format_usage(input_tokens: int, output_tokens: int, cost: float) -> str: + parts: list[str] = [] + if input_tokens > 0 or output_tokens > 0: + parts.append( + f"{_format_tokens(input_tokens)} in / {_format_tokens(output_tokens)} out" + ) + if cost > 0: + parts.append(f"${cost:.4f}") + return ", ".join(parts) + + class AsciiReporter(PluginBase): """Plain ASCII reporter. No colors, no emojis. Works everywhere.""" @@ -123,7 +149,7 @@ def on_fixture_setup_start(self, info: FixtureInfo) -> None: print(f" -> fixture '{info.name}' setup... ({info.scope.value})") def on_fixture_setup_done(self, info: FixtureInfo) -> None: - if self._verbosity >= Verbosity.FIXTURES: + if self._verbosity >= Verbosity.NORMAL: print( f" -> fixture '{info.name}' ready ({_format_duration(info.duration)})" ) @@ -140,11 +166,17 @@ def on_fixture_teardown_done(self, info: FixtureInfo) -> None: def on_test_setup_done(self, info: TestStartInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - print(f" > {info.name} setup done") + self._print_bypass(f" > {info.name} setup done") def on_test_teardown_start(self, info: TestTeardownInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - print(f" < {info.name} teardown...") + self._print_bypass(f" < {info.name} teardown...") + + @staticmethod + def _print_bypass(msg: str) -> None: + stream = getattr(sys.stdout, "_original", sys.stdout) + stream.write(msg + "\n") + stream.flush() def on_test_retry(self, info: TestRetryInfo) -> None: delay_msg = f", retrying in {info.delay}s" if info.delay > 0 else "" @@ -250,6 +282,47 @@ def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: for line in result.output.rstrip().splitlines(): print(f" {line}") + def on_user_print(self, data: Any) -> None: + msg, raw = data + text = msg if raw else strip_markup(msg) + stream = getattr(sys.stdout, "_original", sys.stdout) + stream.write(f" | {text}\n") + stream.flush() + + def on_eval_suite_end(self, report: Any) -> None: + if not isinstance(report, EvalSuiteReport): + return + stats = report.all_score_stats() + print() + print(f" Eval: {report.suite_name} ({report.total_count} cases)") + if stats: + max_name = max(len(s.name) for s in stats) + print(" " + "─" * 60) + for s in stats: + print( + f" {s.name:<{max_name}} " + f"mean={s.mean:.2f} p50={s.median:.2f} " + f"p5={s.p5:.2f} p95={s.p95:.2f}" + ) + print(" " + "─" * 60) + rate_pct = report.pass_rate * 100 + print(f" Passed: {report.passed_count}/{report.total_count} ({rate_pct:.1f}%)") + if report.total_task_tokens > 0 or report.total_task_cost > 0: + print( + f" Task: {_format_usage(report.total_task_input_tokens, report.total_task_output_tokens, report.total_task_cost)}" + ) + if report.total_judge_calls > 0: + judge_parts = [f"{report.total_judge_calls} calls"] + usage = _format_usage( + report.total_judge_input_tokens, + report.total_judge_output_tokens, + report.total_judge_cost, + ) + if usage: + judge_parts.append(usage) + print(f" Judge: {', '.join(judge_parts)}") + print() + def on_session_complete(self, result: SessionResult) -> None: if self._failed_results or self._error_results: self._print_failure_summary() diff --git a/protest/reporting/factory.py b/protest/reporting/factory.py index e3d405a..6d0fbf6 100644 --- a/protest/reporting/factory.py +++ b/protest/reporting/factory.py @@ -18,7 +18,7 @@ def get_reporter(force_no_color: bool = False) -> PluginBase: return AsciiReporter() try: - from rich.console import Console # type: ignore[import-not-found] + from rich.console import Console Console() except ImportError: diff --git a/protest/reporting/rich_reporter.py b/protest/reporting/rich_reporter.py index 2931e6b..506641d 100644 --- a/protest/reporting/rich_reporter.py +++ b/protest/reporting/rich_reporter.py @@ -1,8 +1,12 @@ +import logging +import sys import traceback from argparse import ArgumentParser from pathlib import Path +from typing import Any -from rich.console import Console # type: ignore[import-not-found] +from rich.console import Console +from rich.table import Table from typing_extensions import Self from protest.entities import ( @@ -20,16 +24,22 @@ TestStartInfo, TestTeardownInfo, ) +from protest.evals.types import EvalSuiteReport from protest.plugin import PluginBase, PluginContext from protest.reporting.verbosity import Verbosity +def _short_label(name: str, node_id: str) -> str: + """name + [case_id] from node_id.""" + if "[" in node_id: + suffix = node_id[node_id.index("[") :] + return f"{name}{suffix}" + return name + + def _format_test_name(result: TestResult) -> str: - if "[" in result.node_id: - suffix = result.node_id[result.node_id.index("[") :] - escaped_suffix = suffix.replace("[", "\\[") - return f"{result.name}{escaped_suffix}" - return result.name + label = _short_label(result.name, result.node_id) + return label.replace("[", "\\[") MIN_DURATION_THRESHOLD = 0.001 @@ -43,15 +53,65 @@ def _format_duration(seconds: float) -> str: return f"{seconds:.2f}s" +_TOKEN_K_THRESHOLD = 1000 + + +def _format_tokens(tokens: int) -> str: + """Format token count: 1234 → '1.2k', 45 → '45'.""" + return ( + f"{tokens / _TOKEN_K_THRESHOLD:.1f}k" + if tokens >= _TOKEN_K_THRESHOLD + else str(tokens) + ) + + +def _format_usage(input_tokens: int, output_tokens: int, cost: float) -> str: + """Format usage stats as 'Xk in / Yk out, $0.0042'.""" + parts: list[str] = [] + if input_tokens > 0 or output_tokens > 0: + parts.append( + f"{_format_tokens(input_tokens)} in / {_format_tokens(output_tokens)} out" + ) + if cost > 0: + parts.append(f"${cost:.4f}") + return ", ".join(parts) + + +def _format_eval_scores_inline(result: TestResult) -> str: + """Format eval scores for inline display (e.g. ' bg_score=0.8 char_id=1.0').""" + if not result.eval_payload: + return "" + parts = [] + for name, entry in result.eval_payload.scores.items(): + if entry.skipped: + parts.append(f"{name}=⊘") + continue + val = entry.value + if isinstance(val, bool): + parts.append(f"{name}={'✓' if val else '✗'}") + elif isinstance(val, float): + parts.append(f"{name}={val:.2f}") + else: + parts.append(f"{name}={val}") + return f" [dim]{' '.join(parts)}[/]" if parts else "" + + class RichReporter(PluginBase): """Rich console reporter with colors.""" name = "rich-reporter" description = "Rich console reporter with colors" - def __init__(self, verbosity: int = 0) -> None: + def __init__( + self, + verbosity: int = 0, + show_logs: str | None = None, + show_output: bool = False, + ) -> None: self.console = Console(highlight=False) self._verbosity = verbosity + self._show_logs = show_logs + self._show_output = show_output self._failed_results: list[TestResult] = [] self._error_results: list[TestResult] = [] @@ -71,16 +131,74 @@ def add_cli_options(cls, parser: ArgumentParser) -> None: action="store_true", help="Disable colors (plain ASCII output)", ) + group.add_argument( + "--show-logs", + dest="show_logs", + nargs="?", + const="INFO", + default=None, + metavar="LEVEL", + help="Show captured log records (default: INFO+)", + ) + group.add_argument( + "--show-output", + dest="show_output", + action="store_true", + help="Show eval inputs/output/expected per case", + ) @classmethod def activate(cls, ctx: PluginContext) -> Self | None: if ctx.get("no_color", False): return None - return cls(verbosity=ctx.get("verbosity", 0)) + return cls( + verbosity=ctx.get("verbosity", 0), + show_logs=ctx.get("show_logs"), + show_output=ctx.get("show_output", False), + ) def _print(self, message: str) -> None: self.console.print(message) + def _print_eval_detail(self, result: TestResult) -> None: + """Print eval inputs/output/expected for -vv verbosity.""" + p = result.eval_payload + if not p: + return + if p.inputs is not None: + inp = str(p.inputs)[:200] + self._print(f"[dim] │ inputs: {inp}[/]") + if p.output is not None: + out = str(p.output)[:200] + self._print(f"[dim] │ output: {out}[/]") + if p.expected_output is not None: + exp = str(p.expected_output)[:200] + self._print(f"[dim] │ expected: {exp}[/]") + + def _maybe_show_logs(self, result: TestResult) -> None: + """Show captured log records if --show-logs is active.""" + if not self._show_logs or not result.log_records: + return + min_level = getattr(logging, self._show_logs.upper(), logging.INFO) + for record in result.log_records: + if record.levelno >= min_level: + level = record.levelname + color = ( + "red" + if record.levelno >= logging.ERROR + else "yellow" + if record.levelno >= logging.WARNING + else "dim" + ) + self._print( + f"[{color}] LOG [{level}] {record.name}: {record.getMessage()}[/]" + ) + + def _print_bypass(self, message: str) -> None: + """Print bypassing capture (for lifecycle messages emitted during tests).""" + stream = getattr(sys.stdout, "_original", sys.stdout) + Console(file=stream, highlight=False).print(message) + def on_collection_finish(self, items: list[TestItem]) -> list[TestItem]: return items @@ -128,7 +246,7 @@ def on_fixture_setup_start(self, info: FixtureInfo) -> None: self._print(f"[dim] ↳ fixture '{info.name}' setup... {scope_str}[/]") def on_fixture_setup_done(self, info: FixtureInfo) -> None: - if self._verbosity >= Verbosity.FIXTURES: + if self._verbosity >= Verbosity.NORMAL: self._print( f"[dim] ↳ fixture '{info.name}' ready ({_format_duration(info.duration)})[/]" ) @@ -145,11 +263,13 @@ def on_fixture_teardown_done(self, info: FixtureInfo) -> None: def on_test_setup_done(self, info: TestStartInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - self._print(f"[dim] → {info.name} setup done[/]") + label = _short_label(info.name, info.node_id).replace("[", "\\[") + self._print_bypass(f"[dim] → {label} setup done[/]") def on_test_teardown_start(self, info: TestTeardownInfo) -> None: if self._verbosity >= Verbosity.FIXTURES: - self._print(f"[dim] ← {info.name} teardown...[/]") + label = _short_label(info.name, info.node_id).replace("[", "\\[") + self._print_bypass(f"[dim] ← {label} teardown...[/]") def on_test_retry(self, info: TestRetryInfo) -> None: delay_msg = f", retrying in {info.delay}s" if info.delay > 0 else "" @@ -169,7 +289,13 @@ def on_test_pass(self, result: TestResult) -> None: retry_suffix = ( f" [dim]\\[attempt {result.attempt}/{result.max_attempts}][/]" ) - self._print(f" [green]✓[/] {name} [dim]({duration})[/]{retry_suffix}") + scores_str = _format_eval_scores_inline(result) if result.is_eval else "" + self._print( + f" [green]✓[/] {name} [dim]({duration})[/]{scores_str}{retry_suffix}" + ) + if self._show_output and result.is_eval: + self._print_eval_detail(result) + self._maybe_show_logs(result) def on_test_fail(self, result: TestResult) -> None: name = _format_test_name(result) @@ -197,8 +323,17 @@ def on_test_fail(self, result: TestResult) -> None: self._print(f" [red]✗[/] {name}: {result.error}{retry_suffix}") if result.output: - for line in result.output.rstrip().splitlines(): + lines = result.output.rstrip().splitlines() + max_lines = 20 + for line in lines[:max_lines]: self._print(f"[dim] │ {line}[/]") + if len(lines) > max_lines: + self._print( + f"[dim] │ ... ({len(lines) - max_lines} more lines in .protest/last_run_stdout)[/]" + ) + if result.is_eval: + self._print_eval_detail(result) # always show on fail + self._maybe_show_logs(result) def on_test_skip(self, result: TestResult) -> None: self._skipped += 1 @@ -249,14 +384,16 @@ def _format_traceback(self, error: Exception) -> str: return "".join(lines) def _print_failure_summary(self) -> None: - if self._failed_results: + non_eval_failures = [r for r in self._failed_results if not r.is_eval] + if non_eval_failures: self._print("\n[bold red]═══ FAILURES ═══[/]") - for result in self._failed_results: + for result in non_eval_failures: self._print_failure_detail(result, is_error=False) - if self._error_results: + non_eval_errors = [r for r in self._error_results if not r.is_eval] + if non_eval_errors: self._print("\n[bold yellow]═══ ERRORS ═══[/]") - for result in self._error_results: + for result in non_eval_errors: self._print_failure_detail(result, is_error=True) def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: @@ -281,8 +418,78 @@ def _print_failure_detail(self, result: TestResult, *, is_error: bool) -> None: escaped_line = line.replace("[", "\\[") self._print(f"[dim]{escaped_line}[/]") + def on_user_print(self, data: Any) -> None: + msg, raw = data + # Write to the real stdout, bypassing capture + stream = getattr(sys.stdout, "_original", sys.stdout) + c = Console(file=stream, highlight=False) + if raw: + c.print(msg, markup=False) + else: + c.print(f"[dim] │[/] {msg}") + + def on_eval_suite_end(self, report: Any) -> None: + if not isinstance(report, EvalSuiteReport): + return + stats = report.all_score_stats() + self._print("") + if stats: + table = Table( + title=f"Eval: {report.suite_name} ({report.total_count} cases)", + show_header=True, + header_style="bold cyan", + padding=(0, 1), + ) + table.add_column("Score", style="cyan", no_wrap=True) + table.add_column("mean", justify="right") + table.add_column("p50", justify="right") + table.add_column("p5", justify="right", style="dim") + table.add_column("p95", justify="right", style="dim") + for s in stats: + table.add_row( + s.name, + f"{s.mean:.2f}", + f"{s.median:.2f}", + f"{s.p5:.2f}", + f"{s.p95:.2f}", + ) + self.console.print(table) + else: + self._print( + f" [cyan]Eval: {report.suite_name} ({report.total_count} cases)[/]" + ) + full_pass = 100 + half_pass = 50 + rate_pct = report.pass_rate * full_pass + color = ( + "green" + if rate_pct >= full_pass + else "yellow" + if rate_pct >= half_pass + else "red" + ) + self._print( + f" [{color}]Passed: {report.passed_count}/{report.total_count} ({rate_pct:.1f}%)[/]" + ) + if report.total_task_tokens > 0 or report.total_task_cost > 0: + self._print( + f" [dim]Task: {_format_usage(report.total_task_input_tokens, report.total_task_output_tokens, report.total_task_cost)}[/]" + ) + if report.total_judge_calls > 0: + judge_parts = [f"{report.total_judge_calls} calls"] + usage = _format_usage( + report.total_judge_input_tokens, + report.total_judge_output_tokens, + report.total_judge_cost, + ) + if usage: + judge_parts.append(usage) + self._print(f" [dim]Judge: {', '.join(judge_parts)}[/]") + def on_session_complete(self, result: SessionResult) -> None: - if self._failed_results or self._error_results: + has_non_eval_failures = any(not r.is_eval for r in self._failed_results) + has_non_eval_errors = any(not r.is_eval for r in self._error_results) + if has_non_eval_failures or has_non_eval_errors: self._print_failure_summary() total = ( diff --git a/protest/reporting/web.py b/protest/reporting/web.py index 2e47b5d..517de24 100644 --- a/protest/reporting/web.py +++ b/protest/reporting/web.py @@ -30,12 +30,12 @@ ) try: - from websockets.asyncio.server import ( # type: ignore[import-not-found] + from websockets.asyncio.server import ( serve as ws_serve, ) - from websockets.datastructures import Headers # type: ignore[import-not-found] - from websockets.http11 import Request, Response # type: ignore[import-not-found] - from websockets.sync.client import ( # type: ignore[import-not-found] + from websockets.datastructures import Headers + from websockets.http11 import Request, Response + from websockets.sync.client import ( connect as ws_connect, ) except ImportError as err: # pragma: no cover diff --git a/pyproject.toml b/pyproject.toml index 0608d8d..0dbe858 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,9 @@ rich = [ web = [ "websockets>=12.0", ] +evals = [ + "pydantic-evals>=0.1", +] [tool.ruff] @@ -93,13 +96,25 @@ ignore = [ ] "protest/cli/**" = [ "T201", # print allowed in CLI - "PLC0415", # lazy imports for fast --help "PLR2004", # magic values for arg parsing ] "protest/core/session.py" = [ - "PLC0415", # lazy import for optional rich dependency "PLR0913", # many args is deliberate API design ] +"protest/core/execution/test_executor.py" = [ + "PLR0915", # _run_test is inherently complex (retry loop + eval capture) +] +"protest/history/**" = [ + "S603", # subprocess git calls are safe + "PLR0913", # load_history has many filter params by design +] +"protest/cli/history.py" = [ + "T201", # print for CLI output +] +"protest/evals/**" = [ + "T201", # print for eval reporting + "PLR0913", # adapter functions have many params by design +] "protest/reporting/ascii.py" = [ "T201", # print is the purpose of this module ] @@ -120,6 +135,9 @@ omit = [ "protest/compat.py", # Version-specific imports, impossible to cover without multi-version CI ] +[tool.mypy] +strict = true + [tool.pytest.ini_options] testpaths = ["tests"] asyncio_mode = "strict" @@ -152,6 +170,7 @@ include = ["protest*"] dev = [ "jsonschema>=4.0.0", "mkdocs-material>=9.7.0", + "mypy>=1.0", "pre-commit>=4.5.0", "pytest>=9.0.1", "pytest-asyncio>=1.3.0", diff --git a/tests/core/test_collector.py b/tests/core/test_collector.py index 6b02ad7..9ba8719 100644 --- a/tests/core/test_collector.py +++ b/tests/core/test_collector.py @@ -88,7 +88,7 @@ def test_collect_suite_tests(self) -> None: """Collects tests from suites.""" session = ProTestSession() suite = ProTestSuite("my_suite") - session.include_suite(suite) + session.add_suite(suite) @suite.test() def suite_test() -> None: @@ -107,7 +107,7 @@ def test_collect_mixed_tests(self) -> None: """Collects both standalone and suite tests.""" session = ProTestSession() suite = ProTestSuite("my_suite") - session.include_suite(suite) + session.add_suite(suite) @session.test() def standalone_test() -> None: @@ -129,7 +129,7 @@ def test_collect_generates_correct_node_ids(self) -> None: """Collected items have correct node_ids.""" session = ProTestSession() suite = ProTestSuite("MySuite") - session.include_suite(suite) + session.add_suite(suite) @session.test() def standalone() -> None: diff --git a/tests/core/test_parametrize.py b/tests/core/test_parametrize.py index ec567db..df8a9ac 100644 --- a/tests/core/test_parametrize.py +++ b/tests/core/test_parametrize.py @@ -190,7 +190,7 @@ def test_triple( def test_structured_data_for_reporters(self) -> None: session = ProTestSession() suite = ProTestSuite("API") - session.include_suite(suite) + session.add_suite(suite) users = ForEach(["alice"], ids=lambda u: u) diff --git a/tests/core/test_skip.py b/tests/core/test_skip.py index 437e47d..71cddb1 100644 --- a/tests/core/test_skip.py +++ b/tests/core/test_skip.py @@ -54,7 +54,7 @@ def test_normal() -> None: def test_suite_skip_decorator(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(skip="Suite test skipped") def test_skipped() -> None: diff --git a/tests/core/test_skipif.py b/tests/core/test_skipif.py index 65fe632..4e24388 100644 --- a/tests/core/test_skipif.py +++ b/tests/core/test_skipif.py @@ -74,7 +74,7 @@ def test_skipped() -> None: def test_suite_skip_with_callable(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(skip=lambda: True, skip_reason="Suite conditional skip") def test_skipped() -> None: diff --git a/tests/core/test_xfail.py b/tests/core/test_xfail.py index 8451e23..4cf1d0a 100644 --- a/tests/core/test_xfail.py +++ b/tests/core/test_xfail.py @@ -57,7 +57,7 @@ def test_normal() -> None: def test_suite_xfail_decorator(self) -> None: session = ProTestSession() suite = ProTestSuite("test") - session.include_suite(suite) + session.add_suite(suite) @suite.test(xfail="Suite test xfailed") def test_xfailed() -> None: diff --git a/tests/evals/test_e2e.py b/tests/evals/test_e2e.py new file mode 100644 index 0000000..72ef8ff --- /dev/null +++ b/tests/evals/test_e2e.py @@ -0,0 +1,1076 @@ +"""End-to-end tests for ProTest evals integration. + +These tests define the PUBLIC API contract. They test what the user sees: +- Session setup (EvalSession, @session.eval with ForEach/From) +- CLI behavior (protest run vs protest eval) +- Output format (scores table, trends, failure messages) +- History (JSONL format, stats, significance, clean-dirty) +- Built-in evaluators + +Implementation can change freely as long as these tests pass. +""" + +from __future__ import annotations + +import json +import subprocess +from dataclasses import dataclass +from pathlib import Path # noqa: TC003 — used at runtime (pytest tmp_path) +from typing import Annotated, Any + +from protest import ForEach, From, ProTestSession, Use, fixture +from protest.api import run_session +from protest.core.collector import Collector +from protest.core.runner import TestRunner +from protest.core.suite import ProTestSuite +from protest.entities import SuiteKind +from protest.evals import ( + EvalContext, + EvalSession, + Metric, + ModelInfo, + ShortCircuit, + Verdict, + evaluator, +) +from protest.evals.evaluators import ( + contains_expected, + contains_keywords, + does_not_contain, + json_valid, + matches_regex, + max_length, + min_length, + not_empty, + word_overlap, +) +from protest.evals.hashing import compute_case_hash, compute_eval_hash +from protest.evals.results_writer import EvalResultsWriter +from protest.evals.types import EvalSuiteReport # noqa: TC001 — used at runtime +from protest.filters.kind import KindFilterPlugin +from protest.history.storage import append_entry, clean_dirty +from protest.plugin import PluginBase, PluginContext + +# --------------------------------------------------------------------------- +# Fixtures: deterministic evaluators + task +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True, slots=True) +class FakeAccuracyResult: + """Structured result for fake accuracy evaluator.""" + + accuracy: Annotated[float, Metric] + matches_expected: Annotated[bool, Verdict] + + +@evaluator +def fake_accuracy(ctx: EvalContext) -> FakeAccuracyResult: + if ctx.expected_output and ctx.expected_output.lower() in ctx.output.lower(): + return FakeAccuracyResult(accuracy=1.0, matches_expected=True) + return FakeAccuracyResult(accuracy=0.0, matches_expected=False) + + +@evaluator +async def async_fake_accuracy(ctx: EvalContext) -> FakeAccuracyResult: + """Async evaluator — simulates LLMJudge which calls an async LLM API.""" + # Simulate async I/O (e.g. LLM call) without actually blocking + if ctx.expected_output and ctx.expected_output.lower() in ctx.output.lower(): + return FakeAccuracyResult(accuracy=1.0, matches_expected=True) + return FakeAccuracyResult(accuracy=0.0, matches_expected=False) + + +def echo_task(text: str) -> str: + return f"Echo: {text}" + + +async def async_echo_task(text: str) -> str: + return f"Async: {text}" + + +basic_cases = ForEach( + [ + {"inputs": "hello world", "expected": "hello", "name": "case_pass"}, + {"inputs": "xyz", "expected": "notfound", "name": "case_fail"}, + ], + ids=lambda c: c["name"], +) + + +# --------------------------------------------------------------------------- +# Session setup +# --------------------------------------------------------------------------- + + +class TestEvalSession: + """EvalSession setup: constructor with model=, @session.eval.""" + + def test_add_eval_creates_eval_kind(self) -> None: + session = EvalSession() + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + # The session should have a suite with kind=eval + assert len(session._suites) > 0 + assert any(s.kind == "eval" for s in session._suites) + + def test_model_set_via_constructor(self) -> None: + session = EvalSession(model=ModelInfo(name="test-model")) + assert session._eval_model is not None + assert session._eval_model.name == "test-model" + + def test_metadata_on_constructor(self) -> None: + session = EvalSession(metadata={"env": "test"}) + assert session.metadata["env"] == "test" + + def test_eval_with_bool_verdict(self) -> None: + """Evaluator with bool field: case_fail has matches_expected=False -> fail.""" + session = EvalSession() + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + # case_pass returns matches_expected=True -> pass + # case_fail returns matches_expected=False -> fail + assert result.success is False + + def test_async_task_works(self) -> None: + session = EvalSession() + + @session.eval(evaluators=[fake_accuracy]) + async def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return await async_echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + def test_async_evaluator_does_not_crash(self) -> None: + """Regression: async evaluator called via evaluate_sync raised 'event loop already running'.""" + single_case = ForEach( + [ + {"inputs": "hello world", "expected": "hello", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + + @session.eval(evaluators=[async_fake_accuracy]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + +# --------------------------------------------------------------------------- +# Kind filtering (protest run vs protest eval) +# --------------------------------------------------------------------------- + + +class TestKindFiltering: + """Suites have kind, filtering works.""" + + def test_test_suite_has_kind_test(self) -> None: + suite = ProTestSuite("my_tests") + assert suite.kind == "test" + + def test_eval_suite_has_kind_eval(self) -> None: + session = EvalSession() + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + assert any(s.kind == "eval" for s in session._suites) + + def test_kind_filter_keeps_only_matching(self) -> None: + test_suite = ProTestSuite("tests") + eval_suite = ProTestSuite("evals", kind=SuiteKind.EVAL) + + session = ProTestSession() + + @test_suite.test() + def test_one() -> None: + pass + + @eval_suite.test(is_eval=True) + def eval_one() -> None: + pass + + session.add_suite(test_suite) + session.add_suite(eval_suite) + + items = Collector().collect(session) + assert len(items) == 2 + + # Filter to eval only + plugin = KindFilterPlugin(kind=SuiteKind.EVAL) + filtered = plugin.on_collection_finish(items) + assert len(filtered) == 1 + assert filtered[0].suite.kind == "eval" + + def test_unified_session_runs_tests_only(self) -> None: + """protest run behavior: only kind=test suites.""" + session = ProTestSession() + + test_suite = ProTestSuite("unit") + results: list[str] = [] + + @test_suite.test() + def test_a() -> None: + results.append("test") + + session.add_suite(test_suite) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + ctx = PluginContext(args={"kind_filter": "test"}) + run_session(session, ctx=ctx) + + assert "test" in results + + def test_unified_session_runs_evals_only(self) -> None: + """protest eval behavior: only kind=eval suites.""" + session = ProTestSession() + + test_suite = ProTestSuite("unit") + test_ran = [] + + @test_suite.test() + def test_a() -> None: + test_ran.append(True) + + session.add_suite(test_suite) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + ctx = PluginContext(args={"kind_filter": "eval"}) + run_session(session, ctx=ctx) + + assert len(test_ran) == 0 # test suite was filtered out + + +# --------------------------------------------------------------------------- +# Output format +# --------------------------------------------------------------------------- + + +class TestEvalOutput: + """What the user sees in the terminal. + + These tests verify output by reading the EvalPlugin report directly, + since ProTest captures stdout during test runs. + """ + + def test_report_contains_score_stats(self) -> None: + reports: list[EvalSuiteReport] = [] + + class ReportCapture(PluginBase): + name = "report-capture" + description = "Captures eval reports" + + def on_eval_suite_end(self, report: Any) -> None: + reports.append(report) + + session = EvalSession() + session.register_plugin(ReportCapture()) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(reports) == 1 + stats = reports[0].all_score_stats() + assert len(stats) > 0 + assert any(s.name == "accuracy" for s in stats) + + def test_report_has_pass_count(self) -> None: + reports: list[EvalSuiteReport] = [] + + class ReportCapture(PluginBase): + name = "report-capture" + description = "Captures eval reports" + + def on_eval_suite_end(self, report: Any) -> None: + reports.append(report) + + session = EvalSession() + session.register_plugin(ReportCapture()) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(reports) == 1 + assert reports[0].total_count == 2 + + def test_failed_eval_has_error_with_score_details(self) -> None: + """When an eval case fails, the error message includes score details.""" + errors: list[Any] = [] + + class ErrorCollector(PluginBase): + name = "error-collector" + + def on_test_fail(self, result: Any) -> None: + if result.error: + errors.append(str(result.error)) + + session = EvalSession() + session.register_plugin(ErrorCollector()) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + run_session(session) + + # case_fail has matches_expected=False + assert any("matches_expected=" in e for e in errors) + + +# --------------------------------------------------------------------------- +# EvalPayload flow +# --------------------------------------------------------------------------- + + +class TestEvalPayloadFlow: + """EvalPayload flows through the framework correctly.""" + + def test_test_result_has_eval_payload(self) -> None: + collected: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + collected.append(result) + + def on_test_fail(self, result: Any) -> None: + collected.append(result) + + session = EvalSession() + session.register_plugin(Collector()) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(collected) == 2 + for result in collected: + assert result.is_eval is True + assert result.eval_payload is not None + assert result.eval_payload.case_name in ("case_pass", "case_fail") + assert "accuracy" in result.eval_payload.scores + assert "matches_expected" in result.eval_payload.scores + + def test_lifecycle_events_have_case_id_in_node_id(self) -> None: + """setup_done/teardown_start events carry node_id with [case_id].""" + setup_ids: list[str] = [] + teardown_ids: list[str] = [] + + class LifecycleCollector(PluginBase): + name = "lifecycle-collector" + + def on_test_setup_done(self, info: Any) -> None: + setup_ids.append(info.node_id) + + def on_test_teardown_start(self, info: Any) -> None: + teardown_ids.append(info.node_id) + + session = EvalSession() + session.register_plugin(LifecycleCollector()) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(setup_ids) == 2 + for node_id in setup_ids: + assert "[" in node_id, f"node_id missing case id: {node_id}" + for node_id in teardown_ids: + assert "[" in node_id, f"node_id missing case id: {node_id}" + + def test_evaluator_exception_is_error_not_fail(self) -> None: + """An evaluator that raises is treated as error (infra), not test fail.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + @evaluator + def crashing_evaluator(ctx: EvalContext) -> bool: + raise RuntimeError("LLM judge timeout") + + single_case = ForEach( + [ + {"inputs": "hello", "expected": "hello", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.register_plugin(Collector()) + + @session.eval(evaluators=[crashing_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(results) == 1 + assert results[0].is_fixture_error is True + assert "LLM judge timeout" in str(results[0].error) + + def test_non_eval_test_has_no_payload(self) -> None: + collected: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + collected.append(result) + + session = ProTestSession() + session.register_plugin(Collector()) + + @session.test() + def regular_test() -> None: + assert True + + runner = TestRunner(session) + runner.run() + + assert len(collected) == 1 + assert collected[0].is_eval is False + assert collected[0].eval_payload is None + + +# --------------------------------------------------------------------------- +# History +# --------------------------------------------------------------------------- + + +class TestHistory: + """JSONL history format and querying.""" + + def _run_eval(self, tmp_path: Path) -> None: + session = EvalSession(model=ModelInfo(name="test-model"), history_dir=tmp_path) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + run_session(session) + + def test_history_file_created(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + assert (tmp_path / "history.jsonl").exists() + + def test_history_entry_format(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + + # Required top-level keys + assert "run_id" in entry + assert "timestamp" in entry + assert "git" in entry + assert "environment" in entry + assert "metadata" in entry + assert "evals" in entry + assert "suites" in entry + + # Evals block + assert entry["evals"] is not None + assert entry["evals"]["model"] == "test-model" + + # Suites with kind + suites = entry["suites"] + assert len(suites) == 1 + suite_name = next(iter(suites)) + suite = suites[suite_name] + assert suite["kind"] == "eval" + assert "total_cases" in suite + assert "passed" in suite + assert "cases" in suite + + def test_history_test_run_has_null_evals(self, tmp_path: Path) -> None: + session = ProTestSession(history=True, history_dir=tmp_path) + + @session.test() + def test_simple() -> None: + pass + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + assert entry["evals"] is None + + def test_history_multiple_runs_append(self, tmp_path: Path) -> None: + self._run_eval(tmp_path) + self._run_eval(tmp_path) + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + assert len(lines) == 2 + + def test_history_metadata_included(self, tmp_path: Path) -> None: + session = EvalSession( + history_dir=tmp_path, + metadata={"env": "test", "version": "1.0"}, + ) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + assert entry["metadata"]["env"] == "test" + + +# --------------------------------------------------------------------------- +# History: clean-dirty +# --------------------------------------------------------------------------- + + +class TestCleanDirty: + """protest history --clean-dirty behavior.""" + + def test_clean_dirty_removes_current_head_only(self, tmp_path: Path) -> None: + # Entry with current HEAD + dirty + try: + current_commit = subprocess.run( + ["git", "rev-parse", "HEAD"], # noqa: S607 + capture_output=True, + text=True, + timeout=5, + check=True, + ).stdout.strip() + except (FileNotFoundError, subprocess.CalledProcessError): + return # skip if not in a git repo + + path = tmp_path / "history.jsonl" + + # Dirty entry on current HEAD -> should be removed + append_entry( + path, {"git": {"commit": current_commit, "dirty": True}, "suites": {}} + ) + # Dirty entry on old commit -> should be preserved + append_entry(path, {"git": {"commit": "old123", "dirty": True}, "suites": {}}) + # Clean entry on current HEAD -> should be preserved + append_entry( + path, {"git": {"commit": current_commit, "dirty": False}, "suites": {}} + ) + + removed = clean_dirty(history_dir=tmp_path) + assert removed == 1 + + lines = path.read_text().strip().splitlines() + assert len(lines) == 2 + + +# --------------------------------------------------------------------------- +# Case hashing +# --------------------------------------------------------------------------- + + +class TestCaseHashing: + """Content hashing for eval integrity.""" + + def test_case_hash_stored_in_history(self, tmp_path: Path) -> None: + """History entries include case_hash and eval_hash per case.""" + session = EvalSession(history_dir=tmp_path) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + run_session(session) + + lines = (tmp_path / "history.jsonl").read_text().strip().splitlines() + entry = json.loads(lines[0]) + suites = entry["suites"] + suite = next(iter(suites.values())) + case = next(iter(suite["cases"].values())) + assert "case_hash" in case + assert "eval_hash" in case + assert len(case["case_hash"]) > 0 + assert len(case["eval_hash"]) > 0 + + def test_case_hash_changes_on_input_change(self) -> None: + """Different inputs -> different case_hash.""" + h1 = compute_case_hash("hello world", "expected") + h2 = compute_case_hash("hello world modified", "expected") + assert h1 != h2 + + def test_case_hash_stable_for_same_input(self) -> None: + """Same inputs -> same case_hash (deterministic).""" + h1 = compute_case_hash("hello world", "expected") + h2 = compute_case_hash("hello world", "expected") + assert h1 == h2 + + def test_eval_hash_changes_on_evaluator_change(self) -> None: + """Different evaluators -> different eval_hash.""" + e1 = contains_keywords(keywords=["hello"]) + e2 = contains_keywords(keywords=["hello", "world"]) + h1 = compute_eval_hash([e1]) + h2 = compute_eval_hash([e2]) + assert h1 != h2 + + +# --------------------------------------------------------------------------- +# Built-in evaluators +# --------------------------------------------------------------------------- + + +class TestBuiltinEvaluators: + """All built-in evaluators work correctly through protest-native API.""" + + def _make_ctx(self, output: str, expected: str | None = None) -> EvalContext: + """Minimal EvalContext for evaluator testing.""" + return EvalContext( + name="test", + inputs="", + output=output, + expected_output=expected, + metadata=None, + duration=0.0, + ) + + def test_contains_keywords(self) -> None: + e = contains_keywords(keywords=["hello", "world"]) + result = e(self._make_ctx("Hello World")) + assert result.keyword_recall == 1.0 + assert result.all_keywords_present is True + + def test_contains_expected(self) -> None: + e = contains_expected + assert e(self._make_ctx("Hello World", "world")) is True + assert e(self._make_ctx("Hello", "world")) is False + + def test_does_not_contain(self) -> None: + e = does_not_contain(forbidden=["cat", "dog"]) + assert e(self._make_ctx("Yorkshire")).no_forbidden_words is True + assert e(self._make_ctx("I like cats")).no_forbidden_words is False + + def test_not_empty(self) -> None: + assert not_empty(self._make_ctx("hello")) is True + assert not_empty(self._make_ctx("")) is False + assert not_empty(self._make_ctx(" ")) is False + + def test_max_length(self) -> None: + e = max_length(max_chars=5) + result = e(self._make_ctx("hi")) + assert result.within_limit is True + result = e(self._make_ctx("this is too long")) + assert result.within_limit is False + + def test_min_length(self) -> None: + assert min_length(min_chars=3)(self._make_ctx("hello")) is True + assert min_length(min_chars=10)(self._make_ctx("hi")) is False + + def test_matches_regex(self) -> None: + e = matches_regex(pattern=r"\d{3}-\d{4}") + assert e(self._make_ctx("Call 555-1234")) is True + assert e(self._make_ctx("no numbers")) is False + + def test_json_valid(self) -> None: + e = json_valid(required_keys=["name"]) + result = e(self._make_ctx('{"name": "Rex"}')) + assert result.valid_json is True + assert result.has_required_keys is True + result = e(self._make_ctx("not json")) + assert result.valid_json is False + + def test_word_overlap(self) -> None: + e = word_overlap + assert e(self._make_ctx("hello world", "hello world")).overlap == 1.0 + assert e(self._make_ctx("hello there", "hello world")).overlap == 0.5 + assert e(self._make_ctx("foo", "hello world")).overlap == 0.0 + + +# --------------------------------------------------------------------------- +# Scoring v2: bool verdict, tracking-only metrics +# --------------------------------------------------------------------------- + + +class TestScoringV2: + """Scoring v2: evaluators return bool or dataclass.""" + + def test_bool_evaluator_pass(self) -> None: + """Evaluator returning True -> case passes.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + single_case = ForEach( + [ + {"inputs": "hello world", "expected": "hello", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.register_plugin(Collector()) + + @session.eval(evaluators=[not_empty]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + + assert result.success is True + assert len(results) == 1 + assert results[0].eval_payload.scores["not_empty"].value is True + + def test_dataclass_without_bool_is_tracking_only(self) -> None: + """Dataclass with only float fields -> tracking-only, always passes.""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + single_case = ForEach( + [ + {"inputs": "foo", "expected": "bar baz", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.register_plugin(Collector()) + + @session.eval(evaluators=[word_overlap]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + + # word_overlap returns only float -> tracking-only, always passes + assert result.success is True + + def test_float_return_raises_type_error(self) -> None: + """Evaluator returning naked float -> TypeError (caught as fixture error).""" + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + @evaluator + def bad_evaluator(ctx: EvalContext) -> float: + return 0.5 + + single_case = ForEach( + [{"inputs": "hello", "expected": "hello", "name": "c1"}], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.register_plugin(Collector()) + + @session.eval(evaluators=[bad_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + assert len(results) == 1 + assert results[0].is_fixture_error is True + + +class TestShortCircuit: + """ShortCircuit: skip expensive evaluators when cheap ones fail.""" + + def test_short_circuit_skips_on_fail(self) -> None: + call_log: list[str] = [] + + @evaluator + def cheap(ctx: EvalContext) -> bool: + call_log.append("cheap") + return "hello" in ctx.output.lower() + + @evaluator + def expensive(ctx: EvalContext) -> bool: + call_log.append("expensive") + return True + + session = EvalSession() + + @session.eval(evaluators=[ShortCircuit([cheap, expensive])]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + + # case_pass: cheap ✓ → expensive ✓ (both called) + # case_fail: cheap ✗ → expensive SKIPPED + assert call_log.count("cheap") == 2 + assert call_log.count("expensive") == 1 + + def test_short_circuit_all_pass(self) -> None: + call_log: list[str] = [] + + @evaluator + def check_a(ctx: EvalContext) -> bool: + call_log.append("a") + return True + + @evaluator + def check_b(ctx: EvalContext) -> bool: + call_log.append("b") + return True + + single = ForEach( + [{"inputs": "x", "expected": "x", "name": "c1"}], ids=lambda c: c["name"] + ) + session = EvalSession() + + @session.eval(evaluators=[ShortCircuit([check_a, check_b])]) + def eval_echo(case: Annotated[dict, From(single)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + + assert result.success is True + assert call_log == ["a", "b"] + + +# --------------------------------------------------------------------------- +# Results files per run +# --------------------------------------------------------------------------- + + +class TestResultsFiles: + """Per-case markdown files written to .protest/results/_/.""" + + def _run_eval(self, tmp_path: Path) -> Path: + results_dir = tmp_path / "results" + session = EvalSession() + writer = EvalResultsWriter(history_dir=tmp_path) + session.register_plugin(writer) + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + runner.run() + return results_dir + + def test_results_dir_created(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + assert results_dir.exists() + + def test_one_file_per_case(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dirs = list(results_dir.iterdir()) + assert len(run_dirs) == 1 + case_files = list(run_dirs[0].iterdir()) + assert len(case_files) == 2 # case_pass + case_fail + + def test_case_file_contains_output(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "Echo:" in content # task output + assert "PASS" in content + + def test_case_file_contains_scores(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "accuracy" in content + + def test_case_file_contains_inputs(self, tmp_path: Path) -> None: + results_dir = self._run_eval(tmp_path) + run_dir = next(results_dir.iterdir()) + pass_file = next(f for f in run_dir.iterdir() if "pass" in f.name) + content = pass_file.read_text() + assert "hello world" in content # from case inputs + + +# --------------------------------------------------------------------------- +# Multi-dataset history (regression: all suites were merged under one name) +# --------------------------------------------------------------------------- + + +class TestMultiDatasetHistory: + """Multiple @session.eval calls produce distinct suites in history.""" + + def _run_multi(self, tmp_path: Path) -> dict[str, Any]: + pipeline_cases = ForEach( + [ + {"inputs": "hello", "expected": "hello", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + ingest_cases = ForEach( + [ + {"inputs": "world", "expected": "world", "name": "c2"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession(history_dir=tmp_path) + + @session.eval(evaluators=[fake_accuracy]) + def pipeline(case: Annotated[dict, From(pipeline_cases)]) -> str: + return echo_task(case["inputs"]) + + @session.eval(evaluators=[fake_accuracy]) + def ingest(case: Annotated[dict, From(ingest_cases)]) -> str: + return echo_task(case["inputs"]) + + run_session(session) + + history = (tmp_path / "history.jsonl").read_text().splitlines() + return json.loads(history[-1]) + + def test_two_datasets_produce_two_suites_in_history(self, tmp_path: Path) -> None: + entry = self._run_multi(tmp_path) + assert "pipeline" in entry["suites"] + assert "ingest" in entry["suites"] + + def test_each_suite_has_its_own_cases(self, tmp_path: Path) -> None: + entry = self._run_multi(tmp_path) + assert "c1" in entry["suites"]["pipeline"]["cases"] + assert "c2" in entry["suites"]["ingest"]["cases"] + + +# --------------------------------------------------------------------------- +# DI fixture injection dans les taches eval +# --------------------------------------------------------------------------- + + +class TestEvalTaskFixtures: + """@session.eval() peut utiliser des fixtures protest via Use().""" + + def test_task_without_fixtures_still_works(self) -> None: + # basic_cases has one match (case_pass) and one mismatch (case_fail) + # fake_accuracy returns matches_expected=False for case_fail -> fail + session = EvalSession() + + @session.eval(evaluators=[fake_accuracy]) + def eval_echo(case: Annotated[dict, From(basic_cases)]) -> str: + return echo_task(case["inputs"]) + + runner = TestRunner(session) + result = runner.run() + assert result.success is False # case_fail has matches_expected=False + + def test_task_with_session_fixture_is_injected(self) -> None: + """Une fixture session-scoped est injectee dans task via Use().""" + + @fixture() + def prefix_service() -> str: + return "PREFIX" + + single_case = ForEach( + [ + {"inputs": "hello", "expected": "PREFIX:hello", "name": "c1"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.bind(prefix_service) + + @session.eval(evaluators=[fake_accuracy]) + async def eval_prefixed( + case: Annotated[dict, From(single_case)], + svc: Annotated[str, Use(prefix_service)], + ) -> str: + return f"{svc}:{case['inputs']}" + + runner = TestRunner(session) + result = runner.run() + + # fake_accuracy retourne 1.0 (output contient expected) -> passe + assert result.success is True + + def test_session_fixture_resolved_once_for_all_cases(self) -> None: + """Une session fixture ne doit etre appelee qu'une fois meme avec N cas.""" + call_count = 0 + + @fixture() + def expensive_resource() -> str: + nonlocal call_count + call_count += 1 + return "resource" + + multi_cases = ForEach( + [ + {"inputs": "a", "expected": "resource:a", "name": "c1"}, + {"inputs": "b", "expected": "resource:b", "name": "c2"}, + {"inputs": "c", "expected": "resource:c", "name": "c3"}, + ], + ids=lambda c: c["name"], + ) + + session = EvalSession() + session.bind(expensive_resource) + + @session.eval(evaluators=[fake_accuracy]) + async def eval_resource( + case: Annotated[dict, From(multi_cases)], + res: Annotated[str, Use(expensive_resource)], + ) -> str: + return f"{res}:{case['inputs']}" + + runner = TestRunner(session) + runner.run() + + assert call_count == 1 # fixture resolue une seule fois diff --git a/tests/evals/test_hashing.py b/tests/evals/test_hashing.py new file mode 100644 index 0000000..26e5570 --- /dev/null +++ b/tests/evals/test_hashing.py @@ -0,0 +1,289 @@ +"""Tests for protest.evals.hashing — fail-hard canonicalization.""" + +from __future__ import annotations + +import dataclasses +import functools +import threading + +import pytest + +from protest.evals.hashing import ( + CanonicalError, + _canonical, + compute_case_hash, + compute_eval_hash, +) + +# --------------------------------------------------------------------------- +# Fixtures — representative evaluator types +# --------------------------------------------------------------------------- + + +@dataclasses.dataclass +class SimpleEvaluator: + threshold: float + name: str = "simple" + + +@dataclasses.dataclass +class NestedEvaluator: + inner: SimpleEvaluator + weight: float = 1.0 + + +@dataclasses.dataclass +class LockHoldingEvaluator: + """Simulates evaluators like LLMJudge that hold non-picklable resources.""" + + name: str + _lock: threading.Lock = dataclasses.field(default_factory=threading.Lock) + + +def bare_function(ctx: object) -> bool: + return True + + +def parameterized_function(ctx: object, keywords: list[str]) -> bool: + return True + + +# --------------------------------------------------------------------------- +# _canonical — primitives & containers +# --------------------------------------------------------------------------- + + +class TestCanonicalPrimitives: + @pytest.mark.parametrize("value", [None, True, False, 42, 3.14, "hello"]) + def test_primitives_pass_through(self, value: object) -> None: + assert _canonical(value) is value + + def test_list(self) -> None: + assert _canonical([1, "a", [2]]) == [1, "a", [2]] + + def test_tuple_treated_as_list(self) -> None: + assert _canonical((1, 2)) == [1, 2] + + def test_dict_sorted_by_key(self) -> None: + assert _canonical({"b": 2, "a": 1}) == {"a": 1, "b": 2} + + +# --------------------------------------------------------------------------- +# _canonical — dataclass handling +# --------------------------------------------------------------------------- + + +class TestCanonicalDataclass: + def test_simple_dataclass_is_serialized(self) -> None: + ev = SimpleEvaluator(threshold=0.8) + result = _canonical(ev) + assert result == { + "__type__": "SimpleEvaluator", + "threshold": 0.8, + "name": "simple", + } + + def test_nested_dataclass_is_serialized_recursively(self) -> None: + ev = NestedEvaluator(inner=SimpleEvaluator(threshold=0.5), weight=2.0) + result = _canonical(ev) + assert result == { + "__type__": "NestedEvaluator", + "inner": { + "__type__": "SimpleEvaluator", + "threshold": 0.5, + "name": "simple", + }, + "weight": 2.0, + } + + def test_dataclass_with_lock_skips_private_fields(self) -> None: + """Regression: dataclasses.asdict() deepcopy fails on threading.Lock. + + Private fields (_prefixed) are runtime internals, not config — excluded from hash. + """ + ev = LockHoldingEvaluator(name="llm_judge") + result = _canonical(ev) + assert result == {"__type__": "LockHoldingEvaluator", "name": "llm_judge"} + assert "_lock" not in result + + +# --------------------------------------------------------------------------- +# _canonical — callables (the real-world evaluator path) +# --------------------------------------------------------------------------- + + +class TestCanonicalCallable: + def test_bare_function(self) -> None: + result = _canonical(bare_function) + assert result == {"fn": "bare_function"} + + def test_partial_captures_qualname_and_kwargs(self) -> None: + bound = functools.partial(parameterized_function, keywords=["paris"]) + result = _canonical(bound) + assert result == { + "fn": "parameterized_function", + "args": [], + "kwargs": {"keywords": ["paris"]}, + } + + def test_partial_different_kwargs_different_canonical(self) -> None: + a = functools.partial(parameterized_function, keywords=["paris"]) + b = functools.partial(parameterized_function, keywords=["lyon"]) + assert _canonical(a) != _canonical(b) + + def test_partial_same_kwargs_same_canonical(self) -> None: + a = functools.partial(parameterized_function, keywords=["paris"]) + b = functools.partial(parameterized_function, keywords=["paris"]) + assert _canonical(a) == _canonical(b) + + +# --------------------------------------------------------------------------- +# _canonical — evaluator_identity (explicit, user-controlled) +# --------------------------------------------------------------------------- + + +class TestCanonicalEvaluatorIdentity: + def test_evaluator_identity_takes_precedence(self) -> None: + """evaluator_identity() is used over introspection when available.""" + + class CustomScorer: + def __init__(self, model: str, temperature: float): + self.model = model + self.temperature = temperature + self._client = object() # runtime state, not config + + def evaluator_identity(self) -> dict: + return {"model": self.model, "temperature": self.temperature} + + result = _canonical(CustomScorer(model="gpt-4", temperature=0.7)) + assert result == {"model": "gpt-4", "temperature": 0.7} + + def test_evaluator_identity_on_dataclass_overrides_introspection(self) -> None: + """evaluator_identity() wins even if the object is a dataclass.""" + + @dataclasses.dataclass + class VersionedEvaluator: + threshold: float + version: int = 1 + + def evaluator_identity(self) -> dict: + return {"v": self.version, "t": self.threshold} + + result = _canonical(VersionedEvaluator(threshold=0.8, version=2)) + assert result == {"v": 2, "t": 0.8} + + def test_evaluator_identity_different_config_different_hash(self) -> None: + class CustomScorer: + def __init__(self, model: str): + self.model = model + + def evaluator_identity(self) -> dict: + return {"model": self.model} + + h1 = compute_eval_hash([CustomScorer(model="gpt-4")]) + h2 = compute_eval_hash([CustomScorer(model="claude")]) + assert h1 != h2 + + def test_evaluator_identity_same_config_same_hash(self) -> None: + class CustomScorer: + def __init__(self, model: str): + self.model = model + + def evaluator_identity(self) -> dict: + return {"model": self.model} + + h1 = compute_eval_hash([CustomScorer(model="gpt-4")]) + h2 = compute_eval_hash([CustomScorer(model="gpt-4")]) + assert h1 == h2 + + +# --------------------------------------------------------------------------- +# _canonical — fail-hard on unknown types +# --------------------------------------------------------------------------- + + +class TestCanonicalFailHard: + def test_unknown_type_raises_canonical_error(self) -> None: + class Opaque: + pass + + with pytest.raises(CanonicalError, match="Opaque"): + _canonical(Opaque()) + + def test_non_callable_non_dataclass_raises(self) -> None: + with pytest.raises(CanonicalError): + _canonical(object()) + + def test_error_message_mentions_evaluator_identity(self) -> None: + class Opaque: + pass + + with pytest.raises(CanonicalError, match="evaluator_identity"): + _canonical(Opaque()) + + +# --------------------------------------------------------------------------- +# compute_case_hash +# --------------------------------------------------------------------------- + + +class TestComputeCaseHash: + def test_same_inputs_same_hash(self) -> None: + h1 = compute_case_hash("hello", "expected") + h2 = compute_case_hash("hello", "expected") + assert h1 == h2 + + def test_different_inputs_different_hash(self) -> None: + h1 = compute_case_hash("hello", "expected") + h2 = compute_case_hash("world", "expected") + assert h1 != h2 + + def test_none_expected_is_stable(self) -> None: + h1 = compute_case_hash("hello", None) + h2 = compute_case_hash("hello", None) + assert h1 == h2 + + def test_dict_inputs(self) -> None: + h1 = compute_case_hash({"q": "hello", "context": "world"}, "expected") + h2 = compute_case_hash({"context": "world", "q": "hello"}, "expected") + assert h1 == h2, "dict key order should not affect hash" + + +# --------------------------------------------------------------------------- +# compute_eval_hash +# --------------------------------------------------------------------------- + + +class TestComputeEvalHash: + def test_identical_evaluators_produce_same_hash(self) -> None: + ev = SimpleEvaluator(threshold=0.8) + h1 = compute_eval_hash([ev]) + h2 = compute_eval_hash([ev]) + assert h1 == h2 + + def test_different_thresholds_produce_different_hashes(self) -> None: + ev_a = SimpleEvaluator(threshold=0.8) + ev_b = SimpleEvaluator(threshold=0.9) + assert compute_eval_hash([ev_a]) != compute_eval_hash([ev_b]) + + def test_evaluator_with_lock_does_not_crash(self) -> None: + """Regression for non-picklable evaluator fields.""" + ev = LockHoldingEvaluator(name="llm_judge") + hash_val = compute_eval_hash([ev]) + assert len(hash_val) == 12 + + def test_partial_evaluators_hash_stably(self) -> None: + ev = functools.partial(parameterized_function, keywords=["paris"]) + h1 = compute_eval_hash([ev]) + h2 = compute_eval_hash([ev]) + assert h1 == h2 + + def test_bare_function_evaluator(self) -> None: + h1 = compute_eval_hash([bare_function]) + h2 = compute_eval_hash([bare_function]) + assert h1 == h2 + + def test_different_partial_kwargs_different_hash(self) -> None: + ev_a = functools.partial(parameterized_function, keywords=["paris"]) + ev_b = functools.partial(parameterized_function, keywords=["lyon"]) + assert compute_eval_hash([ev_a]) != compute_eval_hash([ev_b]) diff --git a/tests/evals/test_judge.py b/tests/evals/test_judge.py new file mode 100644 index 0000000..9e6fd11 --- /dev/null +++ b/tests/evals/test_judge.py @@ -0,0 +1,433 @@ +"""Tests for the Judge protocol and ctx.judge() integration.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Annotated, Any + +import pytest + +from protest import ForEach, From +from protest.core.runner import TestRunner +from protest.evals import ( + EvalContext, + EvalSession, + Judge, + JudgeResponse, + TaskResult, + Verdict, + evaluator, +) +from protest.plugin import PluginBase + +# --------------------------------------------------------------------------- +# Fake judge for testing +# --------------------------------------------------------------------------- + + +class FakeJudge: + """Minimal Judge implementation for tests.""" + + name: str = "fake-judge" + provider: str | None = "test" + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + if output_type is bool: + return JudgeResponse( + output="pass" in prompt.lower(), + input_tokens=10, + output_tokens=5, + cost=0.001, + ) + if output_type is str: + return JudgeResponse(output=f"judged: {prompt[:20]}") + # For dataclass types, try to construct with defaults + return JudgeResponse(output=output_type()) + + +class BareJudge: + """Minimal Judge with required name/provider.""" + + name: str = "bare-judge" + provider: str | None = None + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + return JudgeResponse(output=True) + + +# --------------------------------------------------------------------------- +# Protocol compliance +# --------------------------------------------------------------------------- + + +class TestJudgeProtocol: + def test_fake_judge_satisfies_protocol(self) -> None: + assert isinstance(FakeJudge(), Judge) + + def test_bare_judge_satisfies_protocol(self) -> None: + assert isinstance(BareJudge(), Judge) + + def test_non_judge_rejected(self) -> None: + class NotAJudge: + def evaluate(self, prompt: str) -> str: + return "nope" + + assert not isinstance(NotAJudge(), Judge) + + +# --------------------------------------------------------------------------- +# EvalContext.judge() +# --------------------------------------------------------------------------- + + +class TestEvalContextJudge: + @pytest.mark.asyncio + async def test_judge_happy_path(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + result = await ctx.judge("pass this", bool) + assert result is True + + @pytest.mark.asyncio + async def test_judge_str_output(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + result = await ctx.judge("hello world", str) + assert result == "judged: hello world" + + @pytest.mark.asyncio + async def test_judge_raises_without_judge(self) -> None: + ctx = EvalContext( + name="my_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + ) + with pytest.raises(RuntimeError, match="no judge is configured"): + await ctx.judge("test", bool) + + @pytest.mark.asyncio + async def test_judge_error_mentions_case_name(self) -> None: + ctx = EvalContext( + name="chatbot_eval", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + ) + with pytest.raises(RuntimeError, match="chatbot_eval"): + await ctx.judge("test", bool) + + @pytest.mark.asyncio + async def test_judge_call_count(self) -> None: + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + assert ctx.judge_call_count == 0 + await ctx.judge("pass 1", bool) + assert ctx.judge_call_count == 1 + await ctx.judge("pass 2", bool) + await ctx.judge("pass 3", bool) + assert ctx.judge_call_count == 3 + + @pytest.mark.asyncio + async def test_judge_tokens_accumulated(self) -> None: + judge = FakeJudge() # returns input_tokens=10, output_tokens=5 for bool + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("pass 1", bool) + await ctx.judge("pass 2", bool) + assert ctx.judge_input_tokens == 20 + assert ctx.judge_output_tokens == 10 + + @pytest.mark.asyncio + async def test_judge_cost_accumulated(self) -> None: + judge = FakeJudge() # returns cost=0.001 for bool + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("pass 1", bool) + await ctx.judge("pass 2", bool) + assert ctx.judge_cost == pytest.approx(0.002) + + @pytest.mark.asyncio + async def test_judge_none_tokens_not_accumulated(self) -> None: + """JudgeResponse with tokens=None doesn't affect accumulation.""" + judge = FakeJudge() + ctx = EvalContext( + name="test_case", + inputs="q", + output="a", + expected_output=None, + metadata=None, + duration=0.1, + _judge=judge, + ) + await ctx.judge("hello", str) # FakeJudge returns no tokens for str + assert ctx.judge_input_tokens == 0 + assert ctx.judge_output_tokens == 0 + assert ctx.judge_cost == 0.0 + + +# --------------------------------------------------------------------------- +# E2E: EvalSession with judge +# --------------------------------------------------------------------------- + +single_case = ForEach( + [{"inputs": "hello", "expected": "hello", "name": "case_1"}], + ids=lambda c: c["name"], +) + + +class TestJudgeE2E: + def test_judge_available_in_evaluator(self) -> None: + """Full run: evaluator calls ctx.judge(), result is pass.""" + + @evaluator + async def judge_evaluator(ctx: EvalContext) -> bool: + return await ctx.judge("pass this", bool) + + session = EvalSession(judge=FakeJudge()) + + @session.eval(evaluators=[judge_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + def test_no_judge_is_fixture_error(self) -> None: + """Evaluator calls ctx.judge() without judge configured → infra error.""" + + @evaluator + async def needs_judge(ctx: EvalContext) -> bool: + return await ctx.judge("test", bool) + + session = EvalSession() # no judge + + @session.eval(evaluators=[needs_judge]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_fail(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + result = runner.run() + assert result.success is False + assert len(results) == 1 + assert results[0].is_fixture_error is True + + def test_judge_call_count_in_payload(self) -> None: + """judge_call_count flows through to EvalPayload.""" + + @evaluator + async def double_judge(ctx: EvalContext) -> bool: + r1 = await ctx.judge("pass first", bool) + r2 = await ctx.judge("pass second", bool) + return r1 and r2 + + session = EvalSession(judge=FakeJudge()) + + @session.eval(evaluators=[double_judge]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + assert len(results) == 1 + payload = results[0].eval_payload + assert payload is not None + assert payload.judge_call_count == 2 + assert payload.judge_input_tokens == 20 # 10 per call x 2 + assert payload.judge_output_tokens == 10 # 5 per call x 2 + assert payload.judge_cost == pytest.approx(0.002) # 0.001 per call x 2 + + def test_judge_info_derived_from_instance(self) -> None: + """EvalSession derives JudgeInfo from Judge instance.""" + session = EvalSession(judge=FakeJudge()) + assert session._eval_judge is not None + assert session._eval_judge.name == "fake-judge" + assert session._eval_judge.provider == "test" + + def test_no_judge_no_judge_info(self) -> None: + """EvalSession without judge has no JudgeInfo.""" + session = EvalSession() + assert session._eval_judge is None + + def test_judge_with_structured_output(self) -> None: + """Judge returns structured dataclass via output_type.""" + + @dataclass + class JudgeVerdict: + ok: Annotated[bool, Verdict] + + class StructuredJudge: + name: str = "structured" + provider: str | None = None + + async def judge(self, prompt: str, output_type: type) -> JudgeResponse: + return JudgeResponse(output=output_type(ok=True)) + + @evaluator + async def struct_evaluator(ctx: EvalContext) -> JudgeVerdict: + return await ctx.judge("evaluate this", JudgeVerdict) + + session = EvalSession(judge=StructuredJudge()) + + @session.eval(evaluators=[struct_evaluator]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + +# --------------------------------------------------------------------------- +# TaskResult: SUT usage tracking +# --------------------------------------------------------------------------- + + +class TestTaskResult: + def test_task_result_unwrapped_for_evaluators(self) -> None: + """TaskResult is unwrapped — evaluators see the plain output.""" + + @evaluator + def check_output(ctx: EvalContext) -> bool: + return ctx.output == "hello" # sees str, not TaskResult + + session = EvalSession() + + @session.eval(evaluators=[check_output]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> TaskResult[str]: + return TaskResult( + output=case["inputs"], + input_tokens=100, + output_tokens=50, + cost=0.01, + ) + + runner = TestRunner(session) + result = runner.run() + assert result.success is True + + def test_task_usage_in_payload(self) -> None: + """TaskResult tokens/cost flow through to EvalPayload.""" + + @evaluator + def always_pass(ctx: EvalContext) -> bool: + return True + + session = EvalSession() + + @session.eval(evaluators=[always_pass]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> TaskResult[str]: + return TaskResult( + output=case["inputs"], + input_tokens=200, + output_tokens=80, + cost=0.005, + ) + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + assert len(results) == 1 + payload = results[0].eval_payload + assert payload is not None + assert payload.task_input_tokens == 200 + assert payload.task_output_tokens == 80 + assert payload.task_cost == pytest.approx(0.005) + + def test_plain_return_has_zero_task_usage(self) -> None: + """Plain return (no TaskResult) has zero task usage.""" + + @evaluator + def always_pass(ctx: EvalContext) -> bool: + return True + + session = EvalSession() + + @session.eval(evaluators=[always_pass]) + def eval_echo(case: Annotated[dict, From(single_case)]) -> str: + return case["inputs"] + + results: list[Any] = [] + + class Collector(PluginBase): + name = "collector" + + def on_test_pass(self, result: Any) -> None: + results.append(result) + + session.register_plugin(Collector()) + runner = TestRunner(session) + runner.run() + payload = results[0].eval_payload + assert payload.task_input_tokens == 0 + assert payload.task_output_tokens == 0 + assert payload.task_cost == 0.0 diff --git a/tests/test_history_stats.py b/tests/test_history_stats.py new file mode 100644 index 0000000..cc99c17 --- /dev/null +++ b/tests/test_history_stats.py @@ -0,0 +1,164 @@ +"""Tests for history stats — error-only runs must be excluded from stats.""" + +from __future__ import annotations + +from protest.cli.history import _aggregate_suites, _rich_score_arrows + + +def _make_entry( + suite_name: str = "pipeline", + passed: int = 0, + total: int = 0, + errored: int = 0, + cases: dict | None = None, +) -> dict: + """Build a minimal history entry with one suite.""" + return { + "suites": { + suite_name: { + "kind": "eval", + "passed": passed, + "total_cases": total, + "errored": errored, + "cases": cases or {}, + } + } + } + + +def _case(passed: bool, score: float) -> dict: + return {"passed": passed, "scores": {"accuracy": score}} + + +def _error_case() -> dict: + return {"passed": False, "is_error": True, "scores": {}} + + +class TestErrorRunsExcludedFromStats: + """Error-only runs (fixture crashes) are excluded from stats.""" + + def test_error_runs_not_counted(self) -> None: + """Runs where errored >= total should not count in n_runs or pass_rates.""" + entries = [ + _make_entry(passed=29, total=39, cases={"a": _case(True, 0.8)}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=28, total=39, cases={"a": _case(True, 0.7)}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + + # Only 2 real runs counted + assert s["n_runs"] == 2 + assert len(s["pass_rates"]) == 2 + # pass_rates reflect only real runs + assert s["pass_rates"][0] == 29 / 39 + assert s["pass_rates"][1] == 28 / 39 + + def test_error_cases_not_tracked(self) -> None: + """Cases with is_error=True should not appear in cases_seen or score_values.""" + entries = [ + _make_entry( + passed=1, + total=2, + errored=0, + cases={ + "real_case": _case(True, 0.9), + "errored_case": _error_case(), + }, + ), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + assert "real_case" in s["cases_seen"] + assert "errored_case" not in s["cases_seen"] + assert len(s["score_values"]["accuracy"]) == 1 + + def test_error_cases_not_in_flaky(self) -> None: + """Error cases should never appear as flaky.""" + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + _make_entry( + passed=0, + total=1, + errored=1, + cases={"a": _error_case()}, + ), + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + # Only the real run is counted + assert s["n_runs"] == 1 + assert len(s["flaky"]) == 0 + + def test_all_error_runs_produce_empty_suite(self) -> None: + """If ALL runs are errors, suite has 0 runs and empty stats.""" + entries = [ + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + _make_entry(passed=0, total=1, errored=1, cases={"x": _error_case()}), + ] + + suites = _aggregate_suites(entries) + # Suite exists but has 0 real runs + assert suites["pipeline"]["n_runs"] == 0 + assert suites["pipeline"]["pass_rates"] == [] + + def test_mixed_real_and_error_runs(self) -> None: + """Real data pattern: mostly errors with a few real runs.""" + entries = [ + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=29, total=39, cases={"a": _case(True, 0.7)}), # real + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=28, total=39, cases={"a": _case(True, 0.8)}), # real + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + _make_entry(passed=0, total=1, errored=1), # error + ] + + suites = _aggregate_suites(entries) + s = suites["pipeline"] + + assert s["n_runs"] == 2 # not 10 + assert len(s["pass_rates"]) == 2 + # Arrows reflect only the 2 real runs, not the 8 errors + arrows = _rich_score_arrows(s["score_values"]) + # accuracy went 0.7 → 0.8 → should show ↗ + assert "↗" in arrows + + +class TestScoreArrowsWithCleanData: + """Score arrows with only real runs (no errors to filter).""" + + def test_stable_scores_show_no_trend(self) -> None: + entries = [ + _make_entry(passed=2, total=2, cases={"a": _case(True, 0.8)}), + _make_entry(passed=2, total=2, cases={"a": _case(True, 0.8)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "→" in arrows + + def test_improving_scores_show_up(self) -> None: + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.3)}), + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "↗" in arrows + + def test_declining_scores_show_down(self) -> None: + entries = [ + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.9)}), + _make_entry(passed=1, total=1, cases={"a": _case(True, 0.3)}), + ] + suites = _aggregate_suites(entries) + arrows = _rich_score_arrows(suites["pipeline"]["score_values"]) + assert "↘" in arrows diff --git a/uv.lock b/uv.lock index d7c8a6d..e4d7032 100644 --- a/uv.lock +++ b/uv.lock @@ -2,6 +2,29 @@ version = 1 revision = 3 requires-python = ">=3.10" +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/14/2c5dd9f512b66549ae92767a9c7b330ae88e1932ca57876909410251fe13/anyio-4.13.0.tar.gz", hash = "sha256:334b70e641fd2221c1505b3890c69882fe4a2df910cba14d97019b90b24439dc", size = 231622, upload-time = "2026-03-24T12:59:09.671Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/42/e921fccf5015463e32a3cf6ee7f980a6ed0f395ceeaa45060b61d86486c2/anyio-4.13.0-py3-none-any.whl", hash = "sha256:08b310f9e24a9594186fd75b4f73f4a4152069e3853f1ed8bfbf58369f4ad708", size = 114353, upload-time = "2026-03-24T12:59:08.246Z" }, +] + [[package]] name = "attrs" version = "25.4.0" @@ -305,6 +328,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" }, ] +[[package]] +name = "genai-prices" +version = "0.0.56" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/44/6b/94b3018a672c7775edfb485f0fed8f6068fba75e49b067e8a1ac5eb96764/genai_prices-0.0.56.tar.gz", hash = "sha256:ac24b16a84d0ab97539bfa48dfa4649689de8e3ce71c12ebacef29efb1998045", size = 65872, upload-time = "2026-03-20T20:33:00.732Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/f6/8ef7e4c286deb2709d11ca96a5237caae3ef4876ab3c48095856cfd2df30/genai_prices-0.0.56-py3-none-any.whl", hash = "sha256:dbe86be8f3f556bed1b72209ed36851fec8b01793b3b220f42921a4e7da945f6", size = 68966, upload-time = "2026-03-20T20:33:02.555Z" }, +] + [[package]] name = "ghp-import" version = "2.1.0" @@ -317,6 +353,52 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034, upload-time = "2022-05-02T15:47:14.552Z" }, ] +[[package]] +name = "griffelib" +version = "2.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/82/74f4a3310cdabfbb10da554c3a672847f1ed33c6f61dd472681ce7f1fe67/griffelib-2.0.2.tar.gz", hash = "sha256:3cf20b3bc470e83763ffbf236e0076b1211bac1bc67de13daf494640f2de707e", size = 166461, upload-time = "2026-03-27T11:34:51.091Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/8c/c9138d881c79aa0ea9ed83cbd58d5ca75624378b38cee225dcf5c42cc91f/griffelib-2.0.2-py3-none-any.whl", hash = "sha256:925c857658fb1ba40c0772c37acbc2ab650bd794d9c1b9726922e36ea4117ea1", size = 142357, upload-time = "2026-03-27T11:34:46.275Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + [[package]] name = "identify" version = "2.6.15" @@ -335,6 +417,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + [[package]] name = "iniconfig" version = "2.1.0" @@ -383,6 +477,100 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, ] +[[package]] +name = "librt" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/9c/b4b0c54d84da4a94b37bd44151e46d5e583c9534c7e02250b961b1b6d8a8/librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73", size = 177471, upload-time = "2026-02-17T16:13:06.101Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/5f/63f5fa395c7a8a93558c0904ba8f1c8d1b997ca6a3de61bc7659970d66bf/librt-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81fd938344fecb9373ba1b155968c8a329491d2ce38e7ddb76f30ffb938f12dc", size = 65697, upload-time = "2026-02-17T16:11:06.903Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e0/0472cf37267b5920eff2f292ccfaede1886288ce35b7f3203d8de00abfe6/librt-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5db05697c82b3a2ec53f6e72b2ed373132b0c2e05135f0696784e97d7f5d48e7", size = 68376, upload-time = "2026-02-17T16:11:08.395Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8bd1359fdcd27ab897cd5963294fa4a7c83b20a8564678e4fd12157e56a5/librt-0.8.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d56bc4011975f7460bea7b33e1ff425d2f1adf419935ff6707273c77f8a4ada6", size = 197084, upload-time = "2026-02-17T16:11:09.774Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fe/163e33fdd091d0c2b102f8a60cc0a61fd730ad44e32617cd161e7cd67a01/librt-0.8.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdc0f588ff4b663ea96c26d2a230c525c6fc62b28314edaaaca8ed5af931ad0", size = 207337, upload-time = "2026-02-17T16:11:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/f85130582f05dcf0c8902f3d629270231d2f4afdfc567f8305a952ac7f14/librt-0.8.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:97c2b54ff6717a7a563b72627990bec60d8029df17df423f0ed37d56a17a176b", size = 219980, upload-time = "2026-02-17T16:11:12.499Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/cb5e4d03659e043a26c74e08206412ac9a3742f0477d96f9761a55313b5f/librt-0.8.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8f1125e6bbf2f1657d9a2f3ccc4a2c9b0c8b176965bb565dd4d86be67eddb4b6", size = 212921, upload-time = "2026-02-17T16:11:14.484Z" }, + { url = "https://files.pythonhosted.org/packages/b1/81/a3a01e4240579c30f3487f6fed01eb4bc8ef0616da5b4ebac27ca19775f3/librt-0.8.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8f4bb453f408137d7581be309b2fbc6868a80e7ef60c88e689078ee3a296ae71", size = 221381, upload-time = "2026-02-17T16:11:17.459Z" }, + { url = "https://files.pythonhosted.org/packages/08/b0/fc2d54b4b1c6fb81e77288ff31ff25a2c1e62eaef4424a984f228839717b/librt-0.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c336d61d2fe74a3195edc1646d53ff1cddd3a9600b09fa6ab75e5514ba4862a7", size = 216714, upload-time = "2026-02-17T16:11:19.197Z" }, + { url = "https://files.pythonhosted.org/packages/96/96/85daa73ffbd87e1fb287d7af6553ada66bf25a2a6b0de4764344a05469f6/librt-0.8.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:eb5656019db7c4deacf0c1a55a898c5bb8f989be904597fcb5232a2f4828fa05", size = 214777, upload-time = "2026-02-17T16:11:20.443Z" }, + { url = "https://files.pythonhosted.org/packages/12/9c/c3aa7a2360383f4bf4f04d98195f2739a579128720c603f4807f006a4225/librt-0.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c25d9e338d5bed46c1632f851babf3d13c78f49a225462017cf5e11e845c5891", size = 237398, upload-time = "2026-02-17T16:11:22.083Z" }, + { url = "https://files.pythonhosted.org/packages/61/19/d350ea89e5274665185dabc4bbb9c3536c3411f862881d316c8b8e00eb66/librt-0.8.1-cp310-cp310-win32.whl", hash = "sha256:aaab0e307e344cb28d800957ef3ec16605146ef0e59e059a60a176d19543d1b7", size = 54285, upload-time = "2026-02-17T16:11:23.27Z" }, + { url = "https://files.pythonhosted.org/packages/4f/d6/45d587d3d41c112e9543a0093d883eb57a24a03e41561c127818aa2a6bcc/librt-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:56e04c14b696300d47b3bc5f1d10a00e86ae978886d0cee14e5714fafb5df5d2", size = 61352, upload-time = "2026-02-17T16:11:24.207Z" }, + { url = "https://files.pythonhosted.org/packages/1d/01/0e748af5e4fee180cf7cd12bd12b0513ad23b045dccb2a83191bde82d168/librt-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:681dc2451d6d846794a828c16c22dc452d924e9f700a485b7ecb887a30aad1fd", size = 65315, upload-time = "2026-02-17T16:11:25.152Z" }, + { url = "https://files.pythonhosted.org/packages/9d/4d/7184806efda571887c798d573ca4134c80ac8642dcdd32f12c31b939c595/librt-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3b4350b13cc0e6f5bec8fa7caf29a8fb8cdc051a3bae45cfbfd7ce64f009965", size = 68021, upload-time = "2026-02-17T16:11:26.129Z" }, + { url = "https://files.pythonhosted.org/packages/ae/88/c3c52d2a5d5101f28d3dc89298444626e7874aa904eed498464c2af17627/librt-0.8.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ac1e7817fd0ed3d14fd7c5df91daed84c48e4c2a11ee99c0547f9f62fdae13da", size = 194500, upload-time = "2026-02-17T16:11:27.177Z" }, + { url = "https://files.pythonhosted.org/packages/d6/5d/6fb0a25b6a8906e85b2c3b87bee1d6ed31510be7605b06772f9374ca5cb3/librt-0.8.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:747328be0c5b7075cde86a0e09d7a9196029800ba75a1689332348e998fb85c0", size = 205622, upload-time = "2026-02-17T16:11:28.242Z" }, + { url = "https://files.pythonhosted.org/packages/b2/a6/8006ae81227105476a45691f5831499e4d936b1c049b0c1feb17c11b02d1/librt-0.8.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f0af2bd2bc204fa27f3d6711d0f360e6b8c684a035206257a81673ab924aa11e", size = 218304, upload-time = "2026-02-17T16:11:29.344Z" }, + { url = "https://files.pythonhosted.org/packages/ee/19/60e07886ad16670aae57ef44dada41912c90906a6fe9f2b9abac21374748/librt-0.8.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d480de377f5b687b6b1bc0c0407426da556e2a757633cc7e4d2e1a057aa688f3", size = 211493, upload-time = "2026-02-17T16:11:30.445Z" }, + { url = "https://files.pythonhosted.org/packages/9c/cf/f666c89d0e861d05600438213feeb818c7514d3315bae3648b1fc145d2b6/librt-0.8.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d0ee06b5b5291f609ddb37b9750985b27bc567791bc87c76a569b3feed8481ac", size = 219129, upload-time = "2026-02-17T16:11:32.021Z" }, + { url = "https://files.pythonhosted.org/packages/8f/ef/f1bea01e40b4a879364c031476c82a0dc69ce068daad67ab96302fed2d45/librt-0.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e2c6f77b9ad48ce5603b83b7da9ee3e36b3ab425353f695cba13200c5d96596", size = 213113, upload-time = "2026-02-17T16:11:33.192Z" }, + { url = "https://files.pythonhosted.org/packages/9b/80/cdab544370cc6bc1b72ea369525f547a59e6938ef6863a11ab3cd24759af/librt-0.8.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:439352ba9373f11cb8e1933da194dcc6206daf779ff8df0ed69c5e39113e6a99", size = 212269, upload-time = "2026-02-17T16:11:34.373Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/48d6ed8dac595654f15eceab2035131c136d1ae9a1e3548e777bb6dbb95d/librt-0.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:82210adabbc331dbb65d7868b105185464ef13f56f7f76688565ad79f648b0fe", size = 234673, upload-time = "2026-02-17T16:11:36.063Z" }, + { url = "https://files.pythonhosted.org/packages/16/01/35b68b1db517f27a01be4467593292eb5315def8900afad29fabf56304ba/librt-0.8.1-cp311-cp311-win32.whl", hash = "sha256:52c224e14614b750c0a6d97368e16804a98c684657c7518752c356834fff83bb", size = 54597, upload-time = "2026-02-17T16:11:37.544Z" }, + { url = "https://files.pythonhosted.org/packages/71/02/796fe8f02822235966693f257bf2c79f40e11337337a657a8cfebba5febc/librt-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:c00e5c884f528c9932d278d5c9cbbea38a6b81eb62c02e06ae53751a83a4d52b", size = 61733, upload-time = "2026-02-17T16:11:38.691Z" }, + { url = "https://files.pythonhosted.org/packages/28/ad/232e13d61f879a42a4e7117d65e4984bb28371a34bb6fb9ca54ec2c8f54e/librt-0.8.1-cp311-cp311-win_arm64.whl", hash = "sha256:f7cdf7f26c2286ffb02e46d7bac56c94655540b26347673bea15fa52a6af17e9", size = 52273, upload-time = "2026-02-17T16:11:40.308Z" }, + { url = "https://files.pythonhosted.org/packages/95/21/d39b0a87ac52fc98f621fb6f8060efb017a767ebbbac2f99fbcbc9ddc0d7/librt-0.8.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a28f2612ab566b17f3698b0da021ff9960610301607c9a5e8eaca62f5e1c350a", size = 66516, upload-time = "2026-02-17T16:11:41.604Z" }, + { url = "https://files.pythonhosted.org/packages/69/f1/46375e71441c43e8ae335905e069f1c54febee63a146278bcee8782c84fd/librt-0.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:60a78b694c9aee2a0f1aaeaa7d101cf713e92e8423a941d2897f4fa37908dab9", size = 68634, upload-time = "2026-02-17T16:11:43.268Z" }, + { url = "https://files.pythonhosted.org/packages/0a/33/c510de7f93bf1fa19e13423a606d8189a02624a800710f6e6a0a0f0784b3/librt-0.8.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:758509ea3f1eba2a57558e7e98f4659d0ea7670bff49673b0dde18a3c7e6c0eb", size = 198941, upload-time = "2026-02-17T16:11:44.28Z" }, + { url = "https://files.pythonhosted.org/packages/dd/36/e725903416409a533d92398e88ce665476f275081d0d7d42f9c4951999e5/librt-0.8.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:039b9f2c506bd0ab0f8725aa5ba339c6f0cd19d3b514b50d134789809c24285d", size = 209991, upload-time = "2026-02-17T16:11:45.462Z" }, + { url = "https://files.pythonhosted.org/packages/30/7a/8d908a152e1875c9f8eac96c97a480df425e657cdb47854b9efaa4998889/librt-0.8.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bb54f1205a3a6ab41a6fd71dfcdcbd278670d3a90ca502a30d9da583105b6f7", size = 224476, upload-time = "2026-02-17T16:11:46.542Z" }, + { url = "https://files.pythonhosted.org/packages/a8/b8/a22c34f2c485b8903a06f3fe3315341fe6876ef3599792344669db98fcff/librt-0.8.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:05bd41cdee35b0c59c259f870f6da532a2c5ca57db95b5f23689fcb5c9e42440", size = 217518, upload-time = "2026-02-17T16:11:47.746Z" }, + { url = "https://files.pythonhosted.org/packages/79/6f/5c6fea00357e4f82ba44f81dbfb027921f1ab10e320d4a64e1c408d035d9/librt-0.8.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adfab487facf03f0d0857b8710cf82d0704a309d8ffc33b03d9302b4c64e91a9", size = 225116, upload-time = "2026-02-17T16:11:49.298Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a0/95ced4e7b1267fe1e2720a111685bcddf0e781f7e9e0ce59d751c44dcfe5/librt-0.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:153188fe98a72f206042be10a2c6026139852805215ed9539186312d50a8e972", size = 217751, upload-time = "2026-02-17T16:11:50.49Z" }, + { url = "https://files.pythonhosted.org/packages/93/c2/0517281cb4d4101c27ab59472924e67f55e375bc46bedae94ac6dc6e1902/librt-0.8.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dd3c41254ee98604b08bd5b3af5bf0a89740d4ee0711de95b65166bf44091921", size = 218378, upload-time = "2026-02-17T16:11:51.783Z" }, + { url = "https://files.pythonhosted.org/packages/43/e8/37b3ac108e8976888e559a7b227d0ceac03c384cfd3e7a1c2ee248dbae79/librt-0.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e0d138c7ae532908cbb342162b2611dbd4d90c941cd25ab82084aaf71d2c0bd0", size = 241199, upload-time = "2026-02-17T16:11:53.561Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/35812d041c53967fedf551a39399271bbe4257e681236a2cf1a69c8e7fa1/librt-0.8.1-cp312-cp312-win32.whl", hash = "sha256:43353b943613c5d9c49a25aaffdba46f888ec354e71e3529a00cca3f04d66a7a", size = 54917, upload-time = "2026-02-17T16:11:54.758Z" }, + { url = "https://files.pythonhosted.org/packages/de/d1/fa5d5331b862b9775aaf2a100f5ef86854e5d4407f71bddf102f4421e034/librt-0.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff8baf1f8d3f4b6b7257fcb75a501f2a5499d0dda57645baa09d4d0d34b19444", size = 62017, upload-time = "2026-02-17T16:11:55.748Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7c/c614252f9acda59b01a66e2ddfd243ed1c7e1deab0293332dfbccf862808/librt-0.8.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f2ae3725904f7377e11cc37722d5d401e8b3d5851fb9273d7f4fe04f6b3d37d", size = 52441, upload-time = "2026-02-17T16:11:56.801Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3c/f614c8e4eaac7cbf2bbdf9528790b21d89e277ee20d57dc6e559c626105f/librt-0.8.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e6bad1cd94f6764e1e21950542f818a09316645337fd5ab9a7acc45d99a8f35", size = 66529, upload-time = "2026-02-17T16:11:57.809Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/5836544a45100ae411eda07d29e3d99448e5258b6e9c8059deb92945f5c2/librt-0.8.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf450f498c30af55551ba4f66b9123b7185362ec8b625a773b3d39aa1a717583", size = 68669, upload-time = "2026-02-17T16:11:58.843Z" }, + { url = "https://files.pythonhosted.org/packages/06/53/f0b992b57af6d5531bf4677d75c44f095f2366a1741fb695ee462ae04b05/librt-0.8.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eca45e982fa074090057132e30585a7e8674e9e885d402eae85633e9f449ce6c", size = 199279, upload-time = "2026-02-17T16:11:59.862Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ad/4848cc16e268d14280d8168aee4f31cea92bbd2b79ce33d3e166f2b4e4fc/librt-0.8.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c3811485fccfda840861905b8c70bba5ec094e02825598bb9d4ca3936857a04", size = 210288, upload-time = "2026-02-17T16:12:00.954Z" }, + { url = "https://files.pythonhosted.org/packages/52/05/27fdc2e95de26273d83b96742d8d3b7345f2ea2bdbd2405cc504644f2096/librt-0.8.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e4af413908f77294605e28cfd98063f54b2c790561383971d2f52d113d9c363", size = 224809, upload-time = "2026-02-17T16:12:02.108Z" }, + { url = "https://files.pythonhosted.org/packages/7a/d0/78200a45ba3240cb042bc597d6f2accba9193a2c57d0356268cbbe2d0925/librt-0.8.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5212a5bd7fae98dae95710032902edcd2ec4dc994e883294f75c857b83f9aba0", size = 218075, upload-time = "2026-02-17T16:12:03.631Z" }, + { url = "https://files.pythonhosted.org/packages/af/72/a210839fa74c90474897124c064ffca07f8d4b347b6574d309686aae7ca6/librt-0.8.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e692aa2d1d604e6ca12d35e51fdc36f4cda6345e28e36374579f7ef3611b3012", size = 225486, upload-time = "2026-02-17T16:12:04.725Z" }, + { url = "https://files.pythonhosted.org/packages/a3/c1/a03cc63722339ddbf087485f253493e2b013039f5b707e8e6016141130fa/librt-0.8.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4be2a5c926b9770c9e08e717f05737a269b9d0ebc5d2f0060f0fe3fe9ce47acb", size = 218219, upload-time = "2026-02-17T16:12:05.828Z" }, + { url = "https://files.pythonhosted.org/packages/58/f5/fff6108af0acf941c6f274a946aea0e484bd10cd2dc37610287ce49388c5/librt-0.8.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fd1a720332ea335ceb544cf0a03f81df92abd4bb887679fd1e460976b0e6214b", size = 218750, upload-time = "2026-02-17T16:12:07.09Z" }, + { url = "https://files.pythonhosted.org/packages/71/67/5a387bfef30ec1e4b4f30562c8586566faf87e47d696768c19feb49e3646/librt-0.8.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2af9e01e0ef80d95ae3c720be101227edae5f2fe7e3dc63d8857fadfc5a1d", size = 241624, upload-time = "2026-02-17T16:12:08.43Z" }, + { url = "https://files.pythonhosted.org/packages/d4/be/24f8502db11d405232ac1162eb98069ca49c3306c1d75c6ccc61d9af8789/librt-0.8.1-cp313-cp313-win32.whl", hash = "sha256:086a32dbb71336627e78cc1d6ee305a68d038ef7d4c39aaff41ae8c9aa46e91a", size = 54969, upload-time = "2026-02-17T16:12:09.633Z" }, + { url = "https://files.pythonhosted.org/packages/5c/73/c9fdf6cb2a529c1a092ce769a12d88c8cca991194dfe641b6af12fa964d2/librt-0.8.1-cp313-cp313-win_amd64.whl", hash = "sha256:e11769a1dbda4da7b00a76cfffa67aa47cfa66921d2724539eee4b9ede780b79", size = 62000, upload-time = "2026-02-17T16:12:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/d3/97/68f80ca3ac4924f250cdfa6e20142a803e5e50fca96ef5148c52ee8c10ea/librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0", size = 52495, upload-time = "2026-02-17T16:12:11.633Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6a/907ef6800f7bca71b525a05f1839b21f708c09043b1c6aa77b6b827b3996/librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f", size = 66081, upload-time = "2026-02-17T16:12:12.766Z" }, + { url = "https://files.pythonhosted.org/packages/1b/18/25e991cd5640c9fb0f8d91b18797b29066b792f17bf8493da183bf5caabe/librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c", size = 68309, upload-time = "2026-02-17T16:12:13.756Z" }, + { url = "https://files.pythonhosted.org/packages/a4/36/46820d03f058cfb5a9de5940640ba03165ed8aded69e0733c417bb04df34/librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc", size = 196804, upload-time = "2026-02-17T16:12:14.818Z" }, + { url = "https://files.pythonhosted.org/packages/59/18/5dd0d3b87b8ff9c061849fbdb347758d1f724b9a82241aa908e0ec54ccd0/librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c", size = 206907, upload-time = "2026-02-17T16:12:16.513Z" }, + { url = "https://files.pythonhosted.org/packages/d1/96/ef04902aad1424fd7299b62d1890e803e6ab4018c3044dca5922319c4b97/librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3", size = 221217, upload-time = "2026-02-17T16:12:17.906Z" }, + { url = "https://files.pythonhosted.org/packages/6d/ff/7e01f2dda84a8f5d280637a2e5827210a8acca9a567a54507ef1c75b342d/librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14", size = 214622, upload-time = "2026-02-17T16:12:19.108Z" }, + { url = "https://files.pythonhosted.org/packages/1e/8c/5b093d08a13946034fed57619742f790faf77058558b14ca36a6e331161e/librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7", size = 221987, upload-time = "2026-02-17T16:12:20.331Z" }, + { url = "https://files.pythonhosted.org/packages/d3/cc/86b0b3b151d40920ad45a94ce0171dec1aebba8a9d72bb3fa00c73ab25dd/librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6", size = 215132, upload-time = "2026-02-17T16:12:21.54Z" }, + { url = "https://files.pythonhosted.org/packages/fc/be/8588164a46edf1e69858d952654e216a9a91174688eeefb9efbb38a9c799/librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071", size = 215195, upload-time = "2026-02-17T16:12:23.073Z" }, + { url = "https://files.pythonhosted.org/packages/f5/f2/0b9279bea735c734d69344ecfe056c1ba211694a72df10f568745c899c76/librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78", size = 237946, upload-time = "2026-02-17T16:12:24.275Z" }, + { url = "https://files.pythonhosted.org/packages/e9/cc/5f2a34fbc8aeb35314a3641f9956fa9051a947424652fad9882be7a97949/librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023", size = 50689, upload-time = "2026-02-17T16:12:25.766Z" }, + { url = "https://files.pythonhosted.org/packages/a0/76/cd4d010ab2147339ca2b93e959c3686e964edc6de66ddacc935c325883d7/librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730", size = 57875, upload-time = "2026-02-17T16:12:27.465Z" }, + { url = "https://files.pythonhosted.org/packages/84/0f/2143cb3c3ca48bd3379dcd11817163ca50781927c4537345d608b5045998/librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3", size = 48058, upload-time = "2026-02-17T16:12:28.556Z" }, + { url = "https://files.pythonhosted.org/packages/d2/0e/9b23a87e37baf00311c3efe6b48d6b6c168c29902dfc3f04c338372fd7db/librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1", size = 68313, upload-time = "2026-02-17T16:12:29.659Z" }, + { url = "https://files.pythonhosted.org/packages/db/9a/859c41e5a4f1c84200a7d2b92f586aa27133c8243b6cac9926f6e54d01b9/librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee", size = 70994, upload-time = "2026-02-17T16:12:31.516Z" }, + { url = "https://files.pythonhosted.org/packages/4c/28/10605366ee599ed34223ac2bf66404c6fb59399f47108215d16d5ad751a8/librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7", size = 220770, upload-time = "2026-02-17T16:12:33.294Z" }, + { url = "https://files.pythonhosted.org/packages/af/8d/16ed8fd452dafae9c48d17a6bc1ee3e818fd40ef718d149a8eff2c9f4ea2/librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040", size = 235409, upload-time = "2026-02-17T16:12:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/7bdf3e49349c134b25db816e4a3db6b94a47ac69d7d46b1e682c2c4949be/librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e", size = 246473, upload-time = "2026-02-17T16:12:36.656Z" }, + { url = "https://files.pythonhosted.org/packages/4e/8a/91fab8e4fd2a24930a17188c7af5380eb27b203d72101c9cc000dbdfd95a/librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732", size = 238866, upload-time = "2026-02-17T16:12:37.849Z" }, + { url = "https://files.pythonhosted.org/packages/b9/e0/c45a098843fc7c07e18a7f8a24ca8496aecbf7bdcd54980c6ca1aaa79a8e/librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624", size = 250248, upload-time = "2026-02-17T16:12:39.445Z" }, + { url = "https://files.pythonhosted.org/packages/82/30/07627de23036640c952cce0c1fe78972e77d7d2f8fd54fa5ef4554ff4a56/librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4", size = 240629, upload-time = "2026-02-17T16:12:40.889Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/55bfe1ee3542eba055616f9098eaf6eddb966efb0ca0f44eaa4aba327307/librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382", size = 239615, upload-time = "2026-02-17T16:12:42.446Z" }, + { url = "https://files.pythonhosted.org/packages/2b/39/191d3d28abc26c9099b19852e6c99f7f6d400b82fa5a4e80291bd3803e19/librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994", size = 263001, upload-time = "2026-02-17T16:12:43.627Z" }, + { url = "https://files.pythonhosted.org/packages/b9/eb/7697f60fbe7042ab4e88f4ee6af496b7f222fffb0a4e3593ef1f29f81652/librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a", size = 51328, upload-time = "2026-02-17T16:12:45.148Z" }, + { url = "https://files.pythonhosted.org/packages/7c/72/34bf2eb7a15414a23e5e70ecb9440c1d3179f393d9349338a91e2781c0fb/librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4", size = 58722, upload-time = "2026-02-17T16:12:46.85Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c8/d148e041732d631fc76036f8b30fae4e77b027a1e95b7a84bb522481a940/librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61", size = 48755, upload-time = "2026-02-17T16:12:47.943Z" }, +] + +[[package]] +name = "logfire-api" +version = "4.31.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/a2/8d5a3c1c282d5f2bd9f5e9ddd5288d1414a53301ce389af9016b6d82bd50/logfire_api-4.31.0.tar.gz", hash = "sha256:fc4b01257ebd4ce297ad374ed201eb1a9213b999f6ae6df45cfca5bd0ef378f8", size = 77838, upload-time = "2026-03-27T19:00:47.545Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/27/9372b7492b3e146908d520f8599909311cd930175801ad219171fafc6f3e/logfire_api-4.31.0-py3-none-any.whl", hash = "sha256:3c1f502fd4eb8ef0996427a5cf275fd8f327f38600650a1f53071a8171c812db", size = 123402, upload-time = "2026-03-27T19:00:44.952Z" }, +] + [[package]] name = "markdown" version = "3.10" @@ -576,6 +764,73 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/54/662a4743aa81d9582ee9339d4ffa3c8fd40a4965e033d77b9da9774d3960/mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31", size = 8728, upload-time = "2023-11-22T19:09:43.465Z" }, ] +[[package]] +name = "mypy" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/5c/b0089fe7fef0a994ae5ee07029ced0526082c6cfaaa4c10d40a10e33b097/mypy-1.20.0.tar.gz", hash = "sha256:eb96c84efcc33f0b5e0e04beacf00129dd963b67226b01c00b9dfc8affb464c3", size = 3815028, upload-time = "2026-03-31T16:55:14.959Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/a2/a965c8c3fcd4fa8b84ba0d46606181b0d0a1d50f274c67877f3e9ed4882c/mypy-1.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d99f515f95fd03a90875fdb2cca12ff074aa04490db4d190905851bdf8a549a8", size = 14430138, upload-time = "2026-03-31T16:52:37.843Z" }, + { url = "https://files.pythonhosted.org/packages/53/6e/043477501deeb8eabbab7f1a2f6cac62cfb631806dc1d6862a04a7f5011b/mypy-1.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bd0212976dc57a5bfeede7c219e7cd66568a32c05c9129686dd487c059c1b88a", size = 13311282, upload-time = "2026-03-31T16:55:11.021Z" }, + { url = "https://files.pythonhosted.org/packages/65/aa/bd89b247b83128197a214f29f0632ff3c14f54d4cd70d144d157bd7d7d6e/mypy-1.20.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f8426d4d75d68714abc17a4292d922f6ba2cfb984b72c2278c437f6dae797865", size = 13750889, upload-time = "2026-03-31T16:52:02.909Z" }, + { url = "https://files.pythonhosted.org/packages/fa/9d/2860be7355c45247ccc0be1501c91176318964c2a137bd4743f58ce6200e/mypy-1.20.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02cca0761c75b42a20a2757ae58713276605eb29a08dd8a6e092aa347c4115ca", size = 14619788, upload-time = "2026-03-31T16:50:48.928Z" }, + { url = "https://files.pythonhosted.org/packages/75/7f/3ef3e360c91f3de120f205c8ce405e9caf9fc52ef14b65d37073e322c114/mypy-1.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3a49064504be59e59da664c5e149edc1f26c67c4f8e8456f6ba6aba55033018", size = 14918849, upload-time = "2026-03-31T16:51:10.478Z" }, + { url = "https://files.pythonhosted.org/packages/ae/72/af970dfe167ef788df7c5e6109d2ed0229f164432ce828bc9741a4250e64/mypy-1.20.0-cp310-cp310-win_amd64.whl", hash = "sha256:ebea00201737ad4391142808ed16e875add5c17f676e0912b387739f84991e13", size = 10822007, upload-time = "2026-03-31T16:50:25.268Z" }, + { url = "https://files.pythonhosted.org/packages/93/94/ba9065c2ebe5421619aff684b793d953e438a8bfe31a320dd6d1e0706e81/mypy-1.20.0-cp310-cp310-win_arm64.whl", hash = "sha256:e80cf77847d0d3e6e3111b7b25db32a7f8762fd4b9a3a72ce53fe16a2863b281", size = 9756158, upload-time = "2026-03-31T16:48:36.213Z" }, + { url = "https://files.pythonhosted.org/packages/6e/1c/74cb1d9993236910286865679d1c616b136b2eae468493aa939431eda410/mypy-1.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4525e7010b1b38334516181c5b81e16180b8e149e6684cee5a727c78186b4e3b", size = 14343972, upload-time = "2026-03-31T16:49:04.887Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/01399515eca280386e308cf57901e68d3a52af18691941b773b3380c1df8/mypy-1.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a17c5d0bdcca61ce24a35beb828a2d0d323d3fcf387d7512206888c900193367", size = 13225007, upload-time = "2026-03-31T16:50:08.151Z" }, + { url = "https://files.pythonhosted.org/packages/56/ac/b4ba5094fb2d7fe9d2037cd8d18bbe02bcf68fd22ab9ff013f55e57ba095/mypy-1.20.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75ff57defcd0f1d6e006d721ccdec6c88d4f6a7816eb92f1c4890d979d9ee62", size = 13663752, upload-time = "2026-03-31T16:49:26.064Z" }, + { url = "https://files.pythonhosted.org/packages/db/a7/460678d3cf7da252d2288dad0c602294b6ec22a91932ec368cc11e44bb6e/mypy-1.20.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b503ab55a836136b619b5fc21c8803d810c5b87551af8600b72eecafb0059cb0", size = 14532265, upload-time = "2026-03-31T16:53:55.077Z" }, + { url = "https://files.pythonhosted.org/packages/a3/3e/051cca8166cf0438ae3ea80e0e7c030d7a8ab98dffc93f80a1aa3f23c1a2/mypy-1.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1973868d2adbb4584a3835780b27436f06d1dc606af5be09f187aaa25be1070f", size = 14768476, upload-time = "2026-03-31T16:50:34.587Z" }, + { url = "https://files.pythonhosted.org/packages/be/66/8e02ec184f852ed5c4abb805583305db475930854e09964b55e107cdcbc4/mypy-1.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:2fcedb16d456106e545b2bfd7ef9d24e70b38ec252d2a629823a4d07ebcdb69e", size = 10818226, upload-time = "2026-03-31T16:53:15.624Z" }, + { url = "https://files.pythonhosted.org/packages/13/4b/383ad1924b28f41e4879a74151e7a5451123330d45652da359f9183bcd45/mypy-1.20.0-cp311-cp311-win_arm64.whl", hash = "sha256:379edf079ce44ac8d2805bcf9b3dd7340d4f97aad3a5e0ebabbf9d125b84b442", size = 9750091, upload-time = "2026-03-31T16:54:12.162Z" }, + { url = "https://files.pythonhosted.org/packages/be/dd/3afa29b58c2e57c79116ed55d700721c3c3b15955e2b6251dd165d377c0e/mypy-1.20.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:002b613ae19f4ac7d18b7e168ffe1cb9013b37c57f7411984abbd3b817b0a214", size = 14509525, upload-time = "2026-03-31T16:55:01.824Z" }, + { url = "https://files.pythonhosted.org/packages/54/eb/227b516ab8cad9f2a13c5e7a98d28cd6aa75e9c83e82776ae6c1c4c046c7/mypy-1.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9336b5e6712f4adaf5afc3203a99a40b379049104349d747eb3e5a3aa23ac2e", size = 13326469, upload-time = "2026-03-31T16:51:41.23Z" }, + { url = "https://files.pythonhosted.org/packages/57/d4/1ddb799860c1b5ac6117ec307b965f65deeb47044395ff01ab793248a591/mypy-1.20.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f13b3e41bce9d257eded794c0f12878af3129d80aacd8a3ee0dee51f3a978651", size = 13705953, upload-time = "2026-03-31T16:48:55.69Z" }, + { url = "https://files.pythonhosted.org/packages/c5/b7/54a720f565a87b893182a2a393370289ae7149e4715859e10e1c05e49154/mypy-1.20.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9804c3ad27f78e54e58b32e7cb532d128b43dbfb9f3f9f06262b821a0f6bd3f5", size = 14710363, upload-time = "2026-03-31T16:53:26.948Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2a/74810274848d061f8a8ea4ac23aaad43bd3d8c1882457999c2e568341c57/mypy-1.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:697f102c5c1d526bdd761a69f17c6070f9892eebcb94b1a5963d679288c09e78", size = 14947005, upload-time = "2026-03-31T16:50:17.591Z" }, + { url = "https://files.pythonhosted.org/packages/77/91/21b8ba75f958bcda75690951ce6fa6b7138b03471618959529d74b8544e2/mypy-1.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ecd63f75fdd30327e4ad8b5704bd6d91fc6c1b2e029f8ee14705e1207212489", size = 10880616, upload-time = "2026-03-31T16:52:19.986Z" }, + { url = "https://files.pythonhosted.org/packages/8a/15/3d8198ef97c1ca03aea010cce4f1d4f3bc5d9849e8c0140111ca2ead9fdd/mypy-1.20.0-cp312-cp312-win_arm64.whl", hash = "sha256:f194db59657c58593a3c47c6dfd7bad4ef4ac12dbc94d01b3a95521f78177e33", size = 9813091, upload-time = "2026-03-31T16:53:44.385Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a7/f64ea7bd592fa431cb597418b6dec4a47f7d0c36325fec7ac67bc8402b94/mypy-1.20.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b20c8b0fd5877abdf402e79a3af987053de07e6fb208c18df6659f708b535134", size = 14485344, upload-time = "2026-03-31T16:49:16.78Z" }, + { url = "https://files.pythonhosted.org/packages/bb/72/8927d84cfc90c6abea6e96663576e2e417589347eb538749a464c4c218a0/mypy-1.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:367e5c993ba34d5054d11937d0485ad6dfc60ba760fa326c01090fc256adf15c", size = 13327400, upload-time = "2026-03-31T16:53:08.02Z" }, + { url = "https://files.pythonhosted.org/packages/ab/4a/11ab99f9afa41aa350178d24a7d2da17043228ea10f6456523f64b5a6cf6/mypy-1.20.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f799d9db89fc00446f03281f84a221e50018fc40113a3ba9864b132895619ebe", size = 13706384, upload-time = "2026-03-31T16:52:28.577Z" }, + { url = "https://files.pythonhosted.org/packages/42/79/694ca73979cfb3535ebfe78733844cd5aff2e63304f59bf90585110d975a/mypy-1.20.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555658c611099455b2da507582ea20d2043dfdfe7f5ad0add472b1c6238b433f", size = 14700378, upload-time = "2026-03-31T16:48:45.527Z" }, + { url = "https://files.pythonhosted.org/packages/84/24/a022ccab3a46e3d2cdf2e0e260648633640eb396c7e75d5a42818a8d3971/mypy-1.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:efe8d70949c3023698c3fca1e94527e7e790a361ab8116f90d11221421cd8726", size = 14932170, upload-time = "2026-03-31T16:49:36.038Z" }, + { url = "https://files.pythonhosted.org/packages/d8/9b/549228d88f574d04117e736f55958bd4908f980f9f5700a07aeb85df005b/mypy-1.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:f49590891d2c2f8a9de15614e32e459a794bcba84693c2394291a2038bbaaa69", size = 10888526, upload-time = "2026-03-31T16:50:59.827Z" }, + { url = "https://files.pythonhosted.org/packages/91/17/15095c0e54a8bc04d22d4ff06b2139d5f142c2e87520b4e39010c4862771/mypy-1.20.0-cp313-cp313-win_arm64.whl", hash = "sha256:76a70bf840495729be47510856b978f1b0ec7d08f257ca38c9d932720bf6b43e", size = 9816456, upload-time = "2026-03-31T16:49:59.537Z" }, + { url = "https://files.pythonhosted.org/packages/4e/0e/6ca4a84cbed9e62384bc0b2974c90395ece5ed672393e553996501625fc5/mypy-1.20.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0f42dfaab7ec1baff3b383ad7af562ab0de573c5f6edb44b2dab016082b89948", size = 14483331, upload-time = "2026-03-31T16:52:57.999Z" }, + { url = "https://files.pythonhosted.org/packages/7d/c5/5fe9d8a729dd9605064691816243ae6c49fde0bd28f6e5e17f6a24203c43/mypy-1.20.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:31b5dbb55293c1bd27c0fc813a0d2bb5ceef9d65ac5afa2e58f829dab7921fd5", size = 13342047, upload-time = "2026-03-31T16:54:21.555Z" }, + { url = "https://files.pythonhosted.org/packages/4c/33/e18bcfa338ca4e6b2771c85d4c5203e627d0c69d9de5c1a2cf2ba13320ba/mypy-1.20.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49d11c6f573a5a08f77fad13faff2139f6d0730ebed2cfa9b3d2702671dd7188", size = 13719585, upload-time = "2026-03-31T16:51:53.89Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/93491ff7b79419edc7eabf95cb3b3f7490e2e574b2855c7c7e7394ff933f/mypy-1.20.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d3243c406773185144527f83be0e0aefc7bf4601b0b2b956665608bf7c98a83", size = 14685075, upload-time = "2026-03-31T16:54:04.464Z" }, + { url = "https://files.pythonhosted.org/packages/b5/9d/d924b38a4923f8d164bf2b4ec98bf13beaf6e10a5348b4b137eadae40a6e/mypy-1.20.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a79c1eba7ac4209f2d850f0edd0a2f8bba88cbfdfefe6fb76a19e9d4fe5e71a2", size = 14919141, upload-time = "2026-03-31T16:54:51.785Z" }, + { url = "https://files.pythonhosted.org/packages/59/98/1da9977016678c0b99d43afe52ed00bb3c1a0c4c995d3e6acca1a6ebb9b4/mypy-1.20.0-cp314-cp314-win_amd64.whl", hash = "sha256:00e047c74d3ec6e71a2eb88e9ea551a2edb90c21f993aefa9e0d2a898e0bb732", size = 11050925, upload-time = "2026-03-31T16:51:30.758Z" }, + { url = "https://files.pythonhosted.org/packages/5e/e3/ba0b7a3143e49a9c4f5967dde6ea4bf8e0b10ecbbcca69af84027160ee89/mypy-1.20.0-cp314-cp314-win_arm64.whl", hash = "sha256:931a7630bba591593dcf6e97224a21ff80fb357e7982628d25e3c618e7f598ef", size = 10001089, upload-time = "2026-03-31T16:49:43.632Z" }, + { url = "https://files.pythonhosted.org/packages/12/28/e617e67b3be9d213cda7277913269c874eb26472489f95d09d89765ce2d8/mypy-1.20.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:26c8b52627b6552f47ff11adb4e1509605f094e29815323e487fc0053ebe93d1", size = 15534710, upload-time = "2026-03-31T16:52:12.506Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/3b5f2d3e45dc7169b811adce8451679d9430399d03b168f9b0489f43adaa/mypy-1.20.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:39362cdb4ba5f916e7976fccecaab1ba3a83e35f60fa68b64e9a70e221bb2436", size = 14393013, upload-time = "2026-03-31T16:54:41.186Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/edc8b0aa145cc09c1c74f7ce2858eead9329931dcbbb26e2ad40906daa4e/mypy-1.20.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34506397dbf40c15dc567635d18a21d33827e9ab29014fb83d292a8f4f8953b6", size = 15047240, upload-time = "2026-03-31T16:54:31.955Z" }, + { url = "https://files.pythonhosted.org/packages/42/37/a946bb416e37a57fa752b3100fd5ede0e28df94f92366d1716555d47c454/mypy-1.20.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555493c44a4f5a1b58d611a43333e71a9981c6dbe26270377b6f8174126a0526", size = 15858565, upload-time = "2026-03-31T16:53:36.997Z" }, + { url = "https://files.pythonhosted.org/packages/2f/99/7690b5b5b552db1bd4ff362e4c0eb3107b98d680835e65823fbe888c8b78/mypy-1.20.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2721f0ce49cb74a38f00c50da67cb7d36317b5eda38877a49614dc018e91c787", size = 16087874, upload-time = "2026-03-31T16:52:48.313Z" }, + { url = "https://files.pythonhosted.org/packages/aa/76/53e893a498138066acd28192b77495c9357e5a58cc4be753182846b43315/mypy-1.20.0-cp314-cp314t-win_amd64.whl", hash = "sha256:47781555a7aa5fedcc2d16bcd72e0dc83eb272c10dd657f9fb3f9cc08e2e6abb", size = 12572380, upload-time = "2026-03-31T16:49:52.454Z" }, + { url = "https://files.pythonhosted.org/packages/76/9c/6dbdae21f01b7aacddc2c0bbf3c5557aa547827fdf271770fe1e521e7093/mypy-1.20.0-cp314-cp314t-win_arm64.whl", hash = "sha256:c70380fe5d64010f79fb863b9081c7004dd65225d2277333c219d93a10dad4dd", size = 10381174, upload-time = "2026-03-31T16:51:20.179Z" }, + { url = "https://files.pythonhosted.org/packages/21/66/4d734961ce167f0fd8380769b3b7c06dbdd6ff54c2190f3f2ecd22528158/mypy-1.20.0-py3-none-any.whl", hash = "sha256:a6e0641147cbfa7e4e94efdb95c2dab1aff8cfc159ded13e07f308ddccc8c48e", size = 2636365, upload-time = "2026-03-31T16:51:44.911Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -585,6 +840,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, ] +[[package]] +name = "opentelemetry-api" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2c/1d/4049a9e8698361cc1a1aa03a6c59e4fa4c71e0c0f94a30f988a6876a2ae6/opentelemetry_api-1.40.0.tar.gz", hash = "sha256:159be641c0b04d11e9ecd576906462773eb97ae1b657730f0ecf64d32071569f", size = 70851, upload-time = "2026-03-04T14:17:21.555Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/bf/93795954016c522008da367da292adceed71cca6ee1717e1d64c83089099/opentelemetry_api-1.40.0-py3-none-any.whl", hash = "sha256:82dd69331ae74b06f6a874704be0cfaa49a1650e1537d4a813b86ecef7d0ecf9", size = 68676, upload-time = "2026-03-04T14:17:01.24Z" }, +] + [[package]] name = "packaging" version = "24.2" @@ -605,11 +873,11 @@ wheels = [ [[package]] name = "pathspec" -version = "0.12.1" +version = "1.0.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, + { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, ] [[package]] @@ -648,13 +916,16 @@ wheels = [ [[package]] name = "protest" -version = "0.1.1" +version = "0.1.2" source = { editable = "." } dependencies = [ { name = "typing-extensions" }, ] [package.optional-dependencies] +evals = [ + { name = "pydantic-evals" }, +] rich = [ { name = "rich" }, ] @@ -666,6 +937,7 @@ web = [ dev = [ { name = "jsonschema" }, { name = "mkdocs-material" }, + { name = "mypy" }, { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-asyncio" }, @@ -681,16 +953,18 @@ docs = [ [package.metadata] requires-dist = [ + { name = "pydantic-evals", marker = "extra == 'evals'", specifier = ">=0.1" }, { name = "rich", marker = "extra == 'rich'", specifier = ">=13.0" }, { name = "typing-extensions", specifier = ">=4.15.0" }, { name = "websockets", marker = "extra == 'web'", specifier = ">=12.0" }, ] -provides-extras = ["rich", "web"] +provides-extras = ["rich", "web", "evals"] [package.metadata.requires-dev] dev = [ { name = "jsonschema", specifier = ">=4.0.0" }, { name = "mkdocs-material", specifier = ">=9.7.0" }, + { name = "mypy", specifier = ">=1.0" }, { name = "pre-commit", specifier = ">=4.5.0" }, { name = "pytest", specifier = ">=9.0.1" }, { name = "pytest-asyncio", specifier = ">=1.3.0" }, @@ -704,6 +978,190 @@ docs = [ { name = "mkdocs-material", specifier = ">=9.7.0" }, ] +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-ai-slim" +version = "1.73.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "genai-prices" }, + { name = "griffelib" }, + { name = "httpx" }, + { name = "opentelemetry-api" }, + { name = "pydantic" }, + { name = "pydantic-graph" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/1b/a5e18c7c721a3cfce5b17f86cb99e4142fcb70f38ea6d2b8963c2df445e1/pydantic_ai_slim-1.73.0.tar.gz", hash = "sha256:758d5bedb4b4f484c433672639bfc87af216a38453b1539ae10928a9ca62ff62", size = 497208, upload-time = "2026-03-27T03:49:49.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/3b/6aa1874cd0ccbc83c17c8eb308834bf004c8d4344c27cd8048851d4b284d/pydantic_ai_slim-1.73.0-py3-none-any.whl", hash = "sha256:f7176ce6c78539e1070d7e22549186862c2f6e6ea8b05b3aaad8a1942ba1ff4f", size = 638701, upload-time = "2026-03-27T03:49:42.804Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pydantic-evals" +version = "1.73.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "logfire-api" }, + { name = "pydantic" }, + { name = "pydantic-ai-slim" }, + { name = "pyyaml" }, + { name = "rich" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/45/ce1f9b97c4838f940c98693bc1d6298f0e1396355998942b095ce17157fe/pydantic_evals-1.73.0.tar.gz", hash = "sha256:c1f38ad9c4f566bee6958c92f205b8200957b4baf3dd5239e2a4a06edd28e3dc", size = 56137, upload-time = "2026-03-27T03:49:50.861Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/4e/aefc34a68adc165ddec22c0632cb3076579c46751ac11acdf8cec6462891/pydantic_evals-1.73.0-py3-none-any.whl", hash = "sha256:0609210d4825cc8339b5cb649be38321450b46d6e87d72c1ffde73598741fd5a", size = 67143, upload-time = "2026-03-27T03:49:44.298Z" }, +] + +[[package]] +name = "pydantic-graph" +version = "1.73.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "logfire-api" }, + { name = "pydantic" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/22/d479ea32e3c712c6711e41157fb975d81582e5171510e4c662f21a85e9fe/pydantic_graph-1.73.0.tar.gz", hash = "sha256:f0d3e4984af1d902cdda1ccd3fcd86949d45d3ed21559e781f7cf9eace2ed914", size = 58717, upload-time = "2026-03-27T03:49:51.967Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/b3/4cc0b1c543b8a0c1f9add7bdeb2e8cd583961a795664a1a74d1fc8200416/pydantic_graph-1.73.0-py3-none-any.whl", hash = "sha256:aaab8b1580885f5108401db0a7da58d6c7643e467eb626b8a1364b1030327de0", size = 72504, upload-time = "2026-03-27T03:49:45.668Z" }, +] + [[package]] name = "pygments" version = "2.19.2" @@ -1116,6 +1574,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + [[package]] name = "urllib3" version = "2.5.0" @@ -1230,3 +1700,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +]