diff --git a/README.md b/README.md index 6b9f641..8b1a583 100644 --- a/README.md +++ b/README.md @@ -4,523 +4,549 @@ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -**Production-ready caching library** for Python with TTL, stale-while-revalidate (SWR), and background refresh. -Type-safe, fast, thread-safe, async-friendly, and framework-agnostic. +A production-ready Python caching library built around two symbols: `cache` and `bg`. -> Issues & feature requests: [new issue](https://github.com/agkloop/advanced_caching/issues/new) +It supports **TTL**, **Stale-While-Revalidate**, and **Background Refresh** — all in a single decorator that works transparently with both `def` and `async def`. Backends are pluggable (InMemory, Redis, S3, GCS, LocalFile, ChainCache), serialization is swappable (orjson, msgpack, pickle, protobuf, or custom), and metrics can be exported to Prometheus, OpenTelemetry, or GCP Cloud Monitoring. The hot path is lock-free and hits **~6–10 M ops/s** with zero external dependencies on the default config. + +``` +pip install advanced-caching +``` --- -## Table of Contents -- [Installation](#installation) -- [Quick Start](#quick-start) -- [Metrics & Monitoring](#metrics--monitoring) -- [Key Templates](#key-templates) -- [Storage Backends](#storage-backends) - - [InMemCache](#inmemcache) - - [RedisCache & Serializers](#rediscache--serializers) - - [HybridCache (L1 + L2)](#hybridcache-l1--l2) - - [ChainCache (multi-level)](#chaincache-multi-level) - - [Custom Storage](#custom-storage) -- [API Reference](#api-reference) -- [Testing & Benchmarks](#testing--benchmarks) -- [Use Cases](#use-cases) -- [Comparison](#comparison) -- [Contributing](#contributing) -- [License](#license) -- [BGCache (Background)](#bgcache-background) - - [Production example](docs/bgcache.md) +## Contents + +1. [Install](#install) +2. [The Two Symbols](#the-two-symbols) +3. [@cache — TTL & SWR](#cache--ttl--stale-while-revalidate) +4. [@bg — Background Refresh](#bg--background-refresh) +5. [bg.write / bg.read — Multi-Process](#bgwrite--bgread--multi-process) +6. [Storage Backends](#storage-backends) +7. [Serializers](#serializers) +8. [Metrics](#metrics) +9. [Performance](#performance) +10. [Testing](#testing) --- -## Installation +## Install ```bash -uv pip install advanced-caching # core (includes InMemoryMetrics) -uv pip install "advanced-caching[redis]" # Redis support -uv pip install "advanced-caching[opentelemetry]" # OpenTelemetry metrics -uv pip install "advanced-caching[gcp-monitoring]" # GCP Cloud Monitoring -uv pip install "advanced-caching[all-metrics]" # All metrics exporters -# pip works too -```` +pip install advanced-caching # core — InMemCache, orjson +pip install "advanced-caching[redis]" # RedisCache +pip install "advanced-caching[msgpack]" # msgpack serializer +pip install "advanced-caching[s3]" # S3Cache +pip install "advanced-caching[gcs]" # GCSCache +``` --- -## Quick Start +## The Two Symbols ```python -from advanced_caching import TTLCache, SWRCache, BGCache +from advanced_caching import cache, bg +``` + +Everything the library does is exposed through these two names: -# Sync function -@TTLCache.cached("user:{}", ttl=300) -def get_user(user_id: int) -> dict: - return db.fetch(user_id) +| Symbol | Pattern | Works with | +|--------|---------|-----------| +| `@cache(ttl, key=…)` | TTL — expire after N seconds | `def` and `async def` | +| `@cache(ttl, stale=N, key=…)` | Stale-While-Revalidate | `def` and `async def` | +| `@bg(interval, key=…)` | Background refresh on a schedule | `def` and `async def` | +| `@bg.write(interval, key=…)` | Write half of multi-process split | `def` and `async def` | +| `bg.read(key, interval=…)` | Read half — local mirror, never blocks | returns a callable | -# Async function (works natively) -@TTLCache.cached("user:{}", ttl=300) -async def get_user_async(user_id: int) -> dict: - return await db.fetch(user_id) +--- + +## `@cache` — TTL & Stale-While-Revalidate + +### Signature + +```python +cache( + ttl: int | float, + *, + key: str | Callable, # "user:{user_id}", "item:{}", or a callable + stale: int | float = 0, # > 0 enables Stale-While-Revalidate + store: ... = None, # None → fresh InMemCache() per function + metrics: ... = None, +) +``` -# Stale-While-Revalidate (Sync) -@SWRCache.cached("product:{}", ttl=60, stale_ttl=30) -def get_product(product_id: int) -> dict: - return api.fetch_product(product_id) +### TTL cache -# Stale-While-Revalidate (Async) -@SWRCache.cached("async:product:{}", ttl=60, stale_ttl=30) -async def get_product_async(product_id: int) -> dict: - return await api.fetch_product(product_id) +Cache the result for `ttl` seconds. Works with sync and async functions identically. -# Background refresh (Sync) -@BGCache.register_loader("inventory", interval_seconds=300) -def load_inventory() -> list[dict]: - return warehouse_api.get_all_items() +```python +from advanced_caching import cache -# Background refresh (Async) -@BGCache.register_loader("inventory_async", interval_seconds=300) -async def load_inventory_async() -> list[dict]: - return await warehouse_api.get_all_items() +@cache(60, key="user:{user_id}") +async def get_user(user_id: int) -> dict: + return await db.fetchrow("SELECT * FROM users WHERE id=$1", user_id) -# Configured Cache (Reusable Backend) -# Create a decorator pre-wired with a specific cache (e.g., Redis) -RedisTTL = TTLCache.configure(cache=RedisCache(redis_client)) +@cache(300, key="config:{env}") +def load_config(env: str) -> dict: + return read_yaml(f"config/{env}.yaml") -@RedisTTL.cached("user:{}", ttl=300) -async def get_user_redis(user_id: int): - return await db.fetch(user_id) +user = await get_user(42) # miss → calls DB +user = await get_user(42) # hit → instant, no DB ``` ---- +### Stale-While-Revalidate (SWR) -## Metrics & Monitoring +Set `stale > 0` to add a second window after the TTL expires. During this window the stale value is returned immediately while a background refresh runs — eliminating the latency spike that happens on a hard expiry. -**Optional, high-performance metrics** with <1% overhead for production monitoring. +``` +t=0 ──────────── t=ttl ─────────── t=ttl+stale ──── dead + [ fresh: hit ] [ stale: instant + bg refresh ] [ miss ] +``` ```python -from advanced_caching import TTLCache -from advanced_caching.metrics import InMemoryMetrics +@cache(60, stale=30, key="price:{symbol}") +async def get_price(symbol: str) -> float: + return await exchange_api.fetch(symbol) -# Create metrics collector (no external dependencies!) -metrics = InMemoryMetrics() +# t < 60s → fresh hit, no network call +# 60s–90s → returns last known price immediately, triggers bg refresh +# t > 90s → entry dead, blocks caller until refresh completes +``` -# Use with any decorator -@TTLCache.cached("user:{id}", ttl=60, metrics=metrics) -def get_user(id: int): - return {"id": id, "name": "Alice"} +### Key templates -# Query metrics via API -stats = metrics.get_stats() -# Returns: hit_rate, latency percentiles (p50/p95/p99), -# errors, memory usage, background refresh stats -``` +```python +# Static — fastest (~16M ops/s key resolution) +@cache(60, key="feature_flags") +async def load_flags() -> dict: ... -**Built-in collectors:** -- **InMemoryMetrics**: Zero dependencies, perfect for API queries -- **NullMetrics**: Zero overhead when metrics disabled (default) +# Positional {} — maps to the first argument +@cache(60, key="user:{}") +async def get_user(user_id: int) -> dict: ... -**Exporters (optional):** -- **OpenTelemetry**: OTLP, Jaeger, Zipkin, Prometheus -- **GCP Cloud Monitoring**: Google Cloud Platform +# Named — resolved by parameter name +@cache(60, key="order:{user_id}:{order_id}") +async def get_order(user_id: int, order_id: int) -> dict: ... -**Custom exporters:** See [Custom Exporters Guide](docs/custom-metrics-exporters.md) for Prometheus, StatsD, and Datadog implementations. +# Callable — full control +@cache(60, key=lambda uid, role: f"user:{role}:{uid}") +async def get_user_by_role(uid: int, role: str) -> dict: ... +``` -📖 **[Full Metrics Documentation](docs/metrics.md)** +### Invalidation ---- +```python +# Delete a specific entry (same signature as the decorated function) +await get_user.invalidate(42) # removes "user:42" +load_config.invalidate("prod") # removes "config:prod" -## Key Templates +# Wipe everything in the store +get_user.clear() +``` + +### Custom store -The library supports smart key generation that handles both positional and keyword arguments seamlessly. +```python +import redis +from advanced_caching import cache, RedisCache, ChainCache, InMemCache -* **Positional Placeholder**: `"user:{}"` - * Uses the first argument, whether passed positionally or as a keyword. - * Example: `get_user(123)` or `get_user(user_id=123)` -> `"user:123"` +r = redis.from_url("redis://localhost:6379", decode_responses=False) +redis_store = RedisCache(r, prefix="myapp:") -* **Named Placeholder**: `"user:{user_id}"` - * Resolves `user_id` from keyword arguments OR positional arguments (by inspecting the function signature). - * Example: `def get_user(user_id): ...` called as `get_user(123)` -> `"user:123"` +# Single Redis store +@cache(3600, key="catalog:{page}", store=redis_store) +async def get_catalog(page: int) -> list: ... -* **Custom Function**: - * For complex logic, pass a callable. - * Example1 for kw/args with default values use : `key=lambda *a, **k: f"user:{k.get('user_id', a[0])}"` - * Example2 fns with no defaults use : `key=lambda user_id: f"user:{user_id}"` +# Two-tier: L1 InMem (60s) + L2 Redis (1h) +tiered = ChainCache.build(InMemCache(), redis_store, ttls=[60, 3600]) + +@cache(3600, key="catalog:{page}", store=tiered) +async def get_catalog_tiered(page: int) -> list: ... +``` --- -## Storage Backends +## `@bg` — Background Refresh -- InMemCache (default): Fast, process-local -- RedisCache: Distributed in-memory -- HybridCache: L1 (memory) + L2 (Redis) -- ChainCache: Arbitrary multi-level chain (e.g., InMem -> Redis -> S3/GCS) -- S3Cache: Object storage backend (AWS) -- GCSCache: Object storage backend (Google Cloud) -- LocalFileCache: Filesystem-backed cache (per-host) +`@bg` runs the function on a fixed schedule (APScheduler) and stores the result. Every call is a cache read — the function never blocks the caller. Latency is always sub-microsecond. -### InMemCache +### Signature -Thread-safe in-memory cache with TTL. +```python +bg( + interval: int | float, # seconds between refreshes + *, + key: str, # no template placeholders — bg is zero-argument + ttl: int | float | None = None, # default: interval * 2 + store: ... = None, + metrics: ... = None, + on_error: Callable[[Exception], None] | None = None, + run_immediately: bool = True, # populate cache before first request +) +``` + +### Usage ```python -from advanced_caching import InMemCache +from advanced_caching import bg -cache = InMemCache() -cache.set("key", "value", ttl=60) -cache.get("key") -cache.delete("key") -cache.exists("key") -cache.set_if_not_exists("key", "value", ttl=60) -cache.cleanup_expired() -``` +# Async function — uses asyncio scheduler +@bg(300, key="feature_flags") +async def load_flags() -> dict: + return await remote_config.fetch() ---- +# Sync function — uses background thread scheduler +@bg(60, key="db_stats") +def collect_stats() -> dict: + return db.execute("SELECT count(*) FROM users").fetchone() + +# Call exactly like a normal function — always instant +flags = await load_flags() +stats = collect_stats() +``` -### RedisCache & Serializers +### Error handling ```python -import redis -from advanced_caching import RedisCache, JsonSerializer +import logging -client = redis.Redis(host="localhost", port=6379) +@bg(60, key="rates", on_error=lambda e: logging.warning("refresh failed: %s", e)) +async def refresh_rates() -> dict: + return await forex_api.fetch() +# On error: stale value is kept, on_error is called, scheduler keeps running +``` + +### Shutdown -cache = RedisCache(client, prefix="app:") -json_cache = RedisCache(client, prefix="app:json:", serializer="json") -custom_json = RedisCache(client, prefix="app:json2:", serializer=JsonSerializer()) +```python +import atexit +atexit.register(bg.shutdown) + +# FastAPI lifespan: +from contextlib import asynccontextmanager +@asynccontextmanager +async def lifespan(app): + yield + bg.shutdown() ``` --- -### HybridCache (L1 + L2) +## `bg.write` / `bg.read` — Multi-Process + +For multi-process deployments (e.g. gunicorn workers), one process writes to a shared store (Redis) and every reader process keeps a private in-memory copy synced on a schedule. Reader calls are always local — they never touch Redis in the request path. + +```mermaid +flowchart LR + subgraph Worker + W["@bg.write(60, key='rates', store=redis)"] -->|every 60s| FN[refresh fn] + FN --> RD[(Redis)] + end + subgraph "Web Process × N" + BR["bg.read('rates', interval=30, store=redis)"] -->|every 30s| RD + BR --> L[(Local\nInMemCache)] + L -->|sub-μs| REQ[Request handler] + end +``` -Two-level cache: +### `bg.write` -* **L1**: In-memory -* **L2**: Redis +```python +bg.write( + interval: int | float, + *, + key: str, + ttl: int | float | None = None, + store: CacheStorage | None = None, # shared backend, e.g. RedisCache + metrics: MetricsCollector | None = None, + on_error: Callable | None = None, + run_immediately: bool = True, +) +``` -#### Simple setup +- **One writer per key per process** — raises `ValueError` on duplicate registration. +- Tracks `background_refresh` success/failure in `metrics=`. ```python import redis -from advanced_caching import HybridCache, TTLCache +from advanced_caching import bg, RedisCache, InMemoryMetrics -client = redis.Redis() -hybrid = HybridCache.from_redis(client, prefix="app:", l1_ttl=60) +r = redis.from_url(REDIS_URL, decode_responses=False) +shared = RedisCache(r, prefix="shared:") +metrics = InMemoryMetrics() -@TTLCache.cached("user:{}", ttl=300, cache=hybrid) -def get_user(user_id: int): - return {"id": user_id} +@bg.write(60, key="exchange_rates", store=shared, metrics=metrics) +async def refresh_rates() -> dict: + return await forex_api.fetch_all() ``` -#### Manual wiring +### `bg.read` ```python -from advanced_caching import HybridCache, InMemCache, RedisCache - -l1 = InMemCache() -l2 = RedisCache(client, prefix="app:") -# l2_ttl defaults to l1_ttl * 2 if not specified -hybrid = HybridCache(l1_cache=l1, l2_cache=l2, l1_ttl=60) - -# Explicit l2_ttl for longer L2 persistence -hybrid_long_l2 = HybridCache(l1_cache=l1, l2_cache=l2, l1_ttl=60, l2_ttl=3600) +bg.read( + key: str, + *, + interval: int | float = 0, + ttl: int | float | None = None, + store: CacheStorage | None = None, # None → auto-discover writer's store (same process) + metrics: MetricsCollector | None = None, + on_error: Callable | None = None, + run_immediately: bool = True, +) -> Callable[[], Any] ``` -**TTL behavior:** -- `l1_ttl`: How long data stays in fast L1 memory cache -- `l2_ttl`: How long data persists in L2 (Redis). Defaults to `l1_ttl * 2` -- When data expires from L1 but exists in L2, it's automatically repopulated to L1 - -#### With BGCache using lambda factory - -For lazy initialization (e.g., deferred Redis connection): +- Returns a **callable** — call it to get the current value from the local mirror. +- Each `bg.read()` call creates its own **independent** private local cache. +- `store=None` within the same process → auto-discovers the writer's store. ```python -from advanced_caching import BGCache, HybridCache, InMemCache, RedisCache - -def get_redis_cache(): - """Lazy Redis connection factory.""" - import redis - client = redis.Redis(host="localhost", port=6379) - return RedisCache(client, prefix="app:") - -@BGCache.register_loader( - "config_map", - interval_seconds=3600, - run_immediately=True, - cache=lambda: HybridCache( - l1_cache=InMemCache(), - l2_cache=get_redis_cache(), - l1_ttl=3600, - l2_ttl=86400 # L2 persists longer than L1 - ) -) -def load_config_map() -> dict[str, dict]: - return {"db": {"host": "localhost"}, "cache": {"ttl": 300}} +# Different process from writer — must pass store explicitly: +get_rates = bg.read("exchange_rates", interval=30, store=shared) +rates = get_rates() # local dict lookup, never blocks on Redis -# Access nested data -db_host = load_config_map().get("db", {}).get("host") +# Same process as writer — store auto-discovered: +get_rates = bg.read("exchange_rates") ``` --- -### ChainCache (multi-level) +## Storage Backends -```python -from advanced_caching import InMemCache, RedisCache, S3Cache, ChainCache +| Backend | Best for | Install | +|---------|---------|---------| +| `InMemCache` | Single-process apps, highest throughput | built-in | +| `RedisCache` | Distributed / multi-process | `[redis]` | +| `ChainCache` | N-level read-through (L1 + L2 + …) | built-in | +| `HybridCache` | L1 in-memory + L2 Redis, convenience wrapper | `[redis]` | +| `LocalFileCache` | Per-host disk persistence | built-in | +| `S3Cache` | Large objects, cheap durable storage | `[s3]` | +| `GCSCache` | Large objects on Google Cloud | `[gcs]` | -chain = ChainCache([ - (InMemCache(), 60), # L1 fast - (RedisCache(redis_client), 300), # L2 distributed - (S3Cache(bucket="my-cache"), 3600), # L3 durable -]) +### `InMemCache` -# Write-through all levels (per-level TTL caps applied) -chain.set("user:123", {"name": "Ana"}, ttl=900) +Thread-safe. Lock-free hot path (GIL guarantees `dict.get` atomicity). -# Read-through with promotion to faster levels -user = chain.get("user:123") +```python +from advanced_caching import InMemCache +store = InMemCache() ``` -Notes: -- Provide per-level TTL caps in the tuple; if `None`, the passed `ttl` is used. -- `set_if_not_exists` delegates atomicity to the deepest level and backfills upper levels on success. -- `get`/`get_entry` promote hits upward for hotter reads. +### `RedisCache` ---- +```python +import redis +from advanced_caching import RedisCache, serializers -### Object Storage Backends (S3/GCS) +r = redis.from_url("redis://localhost:6379", decode_responses=False) -Store large cached objects cheaply in AWS S3 or Google Cloud Storage. -Supports compression and metadata-based TTL checks to minimize costs. +store = RedisCache(r, prefix="app:", serializer=serializers.msgpack) +``` -**[📚 Full Documentation & Best Practices](docs/object-storage-caching.md)** +Connection pooling: ```python -from advanced_caching import S3Cache, GCSCache - -user_cache = S3Cache( - bucket="my-cache-bucket", - prefix="users/", - serializer="json", - compress=True, - dedupe_writes=True, # optional: skip uploads when content unchanged (adds HEAD) -) +pool = redis.ConnectionPool.from_url("redis://localhost", max_connections=20) +r = redis.Redis(connection_pool=pool, decode_responses=False) +``` + +### `ChainCache` — multi-level read-through -gcs_cache = GCSCache( - bucket="my-cache-bucket", - prefix="users/", - serializer="json", - compress=True, - dedupe_writes=True, # optional: skip uploads when content unchanged (adds metadata check) +On a miss at L1, reads from L2 and backfills L1. On a hit at L1, never touches L2. + +```python +from advanced_caching import ChainCache, InMemCache, RedisCache + +tiered = ChainCache.build( + InMemCache(), + RedisCache(r, prefix="v1:"), + ttls=[60, 3600], # L1 TTL=60s, L2 TTL=1h ) + +# Three tiers: +three_tier = ChainCache.build(l1, l2, l3, ttls=[60, 3600, 86400]) ``` -### RedisCache dedupe_writes +### `LocalFileCache` -`RedisCache(..., dedupe_writes=True)` compares the serialized payload to the stored value; if unchanged, it skips rewriting and only refreshes TTL when provided. +```python +from advanced_caching import LocalFileCache, serializers +store = LocalFileCache("/var/cache/myapp", serializer=serializers.json) +``` -### LocalFileCache (filesystem) +### `S3Cache` / `GCSCache` ```python -from advanced_caching import LocalFileCache +from advanced_caching import S3Cache, GCSCache, serializers -cache = LocalFileCache("/var/tmp/ac-cache", dedupe_writes=True) -cache.set("user:123", {"name": "Ana"}, ttl=300) -user = cache.get("user:123") +s3 = S3Cache(bucket="myapp-cache", prefix="v1/", serializer=serializers.msgpack) +gcs = GCSCache(bucket="myapp-cache", prefix="v1/", serializer=serializers.json) ``` -Notes: one file per key; atomic writes; optional compression and dedupe to skip rewriting identical content. - --- -### Custom Storage +## Serializers + +Serializers are only relevant for backends that write bytes externally: `RedisCache`, `LocalFileCache`, `S3Cache`, `GCSCache`. `InMemCache` stores Python objects directly — no serialization overhead. -Implement your own storage backend by following the `CacheStorage` protocol: +| Serializer | Symbol | Best for | +|-----------|--------|---------| +| orjson (default) | `serializers.json` | JSON-safe dicts / lists | +| pickle | `serializers.pickle` | Any Python object, no schema | +| msgpack | `serializers.msgpack` | Compact binary, large payloads | +| protobuf | `serializers.protobuf(MyClass)` | Cross-language, enforced schema | +| custom | any object with `.dumps`/`.loads` | Anything | ```python -from advanced_caching import CacheStorage, CacheEntry -from typing import Any - -class MyCustomStorage: - """Custom cache storage implementation.""" - - def get(self, key: str) -> Any | None: - """Retrieve value by key, or None if not found/expired.""" - ... - - def get_entry(self, key: str) -> CacheEntry | None: - """Retrieve full cache entry with metadata.""" - ... - - def set(self, key: str, value: Any, ttl: int | None = None) -> None: - """Store value with optional TTL in seconds.""" - ... - - def set_if_not_exists(self, key: str, value: Any, ttl: int | None = None) -> bool: - """Atomic set-if-not-exists. Returns True if set, False if key exists.""" - ... - - def delete(self, key: str) -> None: - """Remove key from storage.""" - ... - - def exists(self, key: str) -> bool: - """Check if key exists and is not expired.""" - ... - -# Validate implementation -from advanced_caching import validate_cache_storage -validate_cache_storage(MyCustomStorage()) - -# Use with decorators -@TTLCache.cached("user:{id}", ttl=60, cache=MyCustomStorage()) -def get_user(id: int): - return {"id": id} -``` - -**Exposing Metrics:** - -To track cache operations in your custom storage, wrap it with `InstrumentedStorage`: +from advanced_caching import serializers, RedisCache + +RedisCache(r, serializer=serializers.json) +RedisCache(r, serializer=serializers.pickle) +RedisCache(r, serializer=serializers.msgpack) +RedisCache(r, serializer=serializers.protobuf(MyProto)) + +# Custom: +class MySerializer: + def dumps(self, v: object) -> bytes: ... + def loads(self, b: bytes) -> object: ... + +RedisCache(r, serializer=MySerializer()) +``` + +--- + +## Metrics + +### `InMemoryMetrics` — built-in collector ```python -from advanced_caching.storage import InstrumentedStorage -from advanced_caching.metrics import InMemoryMetrics +from advanced_caching import InMemoryMetrics -# Create metrics collector metrics = InMemoryMetrics() -# Wrap your custom storage -instrumented = InstrumentedStorage( - storage=MyCustomStorage(), - metrics=metrics, - cache_name="my_custom_cache" -) +@cache(60, key="user:{uid}", metrics=metrics) +async def get_user(uid: int) -> dict: ... -# Use instrumented storage -@TTLCache.cached("user:{id}", ttl=60, cache=instrumented) -def get_user(id: int): - return {"id": id} +@bg(300, key="flags", metrics=metrics) +async def load_flags() -> dict: ... -# Query metrics stats = metrics.get_stats() -# Includes: hits, misses, latency, errors, memory usage for "my_custom_cache" +# { +# "caches": { +# "get_user": { +# "hits": 120, "misses": 5, "hit_rate_percent": 96.0, +# "latency_p50_ms": 0.08, "latency_p95_ms": 0.31, +# "latency_p99_ms": 0.85, "errors": 0 +# } +# }, +# "background_refresh": { +# "flags": {"success": 12, "failure": 0} +# } +# } ``` -`InstrumentedStorage` automatically tracks: -- All cache operations (get, set, delete) -- Operation latency (p50/p95/p99 percentiles) -- Errors with exception types -- Memory usage (if your storage supports it) +### Exporters -See [Metrics Documentation](docs/metrics.md) for details. +```python +# Prometheus (pip install prometheus_client) +from advanced_caching.exporters import PrometheusMetrics +metrics = PrometheusMetrics(namespace="myapp", subsystem="cache") ---- +# OpenTelemetry (pip install opentelemetry-api) +from advanced_caching.exporters import OpenTelemetryMetrics +metrics = OpenTelemetryMetrics(meter_name="myapp.cache") -## BGCache (Background) +# GCP Cloud Monitoring (pip install google-cloud-monitoring) +from advanced_caching.exporters import GCPCloudMonitoringMetrics +metrics = GCPCloudMonitoringMetrics(project_id="my-project") +``` -Single-writer/multi-reader pattern with background refresh and optional independent reader caches. +### Custom collector ```python -from advanced_caching import BGCache, InMemCache - -# Writer: enforced single registration per key; refreshes cache on a schedule -@BGCache.register_writer( - "daily_config", - interval_seconds=300, # refresh every 5 minutes - ttl=None, # defaults to interval*2 - run_immediately=True, - cache=InMemCache(), # or RedisCache / ChainCache -) -def load_config(): - return expensive_fetch() - -# Readers: read-only; keep a local cache warm by pulling from the writer's cache -reader = BGCache.get_reader( - "daily_config", - interval_seconds=60, # periodically pull from source cache into local cache - ttl=None, # local cache TTL defaults to interval*2 - run_immediately=True, - cache=InMemCache(), # local cache for this process -) - -# Usage -cfg = reader() # returns value from local cache; on miss pulls once from source cache +class MyMetrics: + def record_hit(self, cache_name, key=None, metadata=None): ... + def record_miss(self, cache_name, key=None, metadata=None): ... + def record_set(self, cache_name, key=None, value_size=None, metadata=None): ... + def record_delete(self, cache_name, key=None, metadata=None): ... + def record_latency(self, cache_name, operation=None, duration_seconds=None, metadata=None): ... + def record_error(self, cache_name, operation=None, error_type=None, metadata=None): ... + def record_memory_usage(self, cache_name, bytes_used=None, entry_count=None, metadata=None): ... + def record_background_refresh(self, cache_name, success=None, duration_seconds=None, metadata=None): ... ``` -Notes: -- `register_writer` enforces one writer per key globally; raises if duplicate. -- `interval_seconds` <= 0 disables scheduling; wrapper still writes-on-demand on misses. -- `run_immediately=True` triggers an initial refresh if the cache is empty. -- `get_reader` creates a read-only accessor backed by its own cache; it pulls from the provided cache (usually the writer’s cache) and optionally keeps it warm on a schedule. -- Use `cache=` on readers to override the local cache backend (e.g., InMemCache in each process) while sourcing data from the writer’s cache backend. +### `NULL_METRICS` — zero-overhead no-op + +```python +from advanced_caching.metrics import NULL_METRICS -See `docs/bgcache.md` for a production-grade example with Redis/ChainCache, error handling, and reader-local caches. +@cache(60, key="fast:{x}", metrics=NULL_METRICS) +def fast_fn(x: int) -> int: ... +``` --- -## API Reference +## Performance -* `TTLCache.cached(key, ttl, cache=None)` -* `SWRCache.cached(key, ttl, stale_ttl=0, cache=None)` -* `BGCache.register_loader(key, interval_seconds, ttl=None, run_immediately=True)` -* Storages: +Measured on Python 3.12, Apple M2, single thread, N=200,000 iterations. - * `InMemCache()` - * `RedisCache(redis_client, prefix="", serializer="pickle"|"json"|custom)` - * `HybridCache(l1_cache, l2_cache, l1_ttl=60, l2_ttl=None)` - `l2_ttl` defaults to `l1_ttl * 2` -* Utilities: +**Storage & decorator hot paths** - * `CacheEntry` - * `CacheStorage` - * `validate_cache_storage()` +| Operation | Throughput | Latency | +|-----------|-----------|---------| +| `InMemCache.get()` raw | **10.3 M ops/s** | 0.10 µs | +| `@cache` sync miss (ttl=0) | **7.3 M ops/s** | 0.14 µs | +| `bg.read()` local hit | **7.5 M ops/s** | 0.13 µs | +| `@cache` sync hit — static key | **6.0 M ops/s** | 0.17 µs | +| `@cache` async hit — static key | **4.9 M ops/s** | 0.20 µs | +| `@cache` SWR stale-serve | **2.9 M ops/s** | 0.35 µs | +| `@cache` ChainCache L1 hit | **2.9 M ops/s** | 0.35 µs | +| `@cache` sync hit — named template key | **1.7 M ops/s** | 0.59 µs | +| `@cache` sync hit + InMemoryMetrics | **1.6 M ops/s** | 0.63 µs | ---- +**Callable key strategies** + +| Key type | Throughput | Latency | Notes | +|----------|-----------|---------|-------| +| `key=lambda uid: f"u:{uid}"` | **3.9 M ops/s** | 0.26 µs | Fastest callable — no inspection | +| `key=lambda t, uid: f"{t}:{uid}"` (async) | **2.7 M ops/s** | 0.37 µs | Multi-arg async | +| `key=lambda uid: f"...{md5(uid)}"` | **1.4 M ops/s** | 0.73 µs | Hashing overhead | +| `key="user:{user_id}"` template | **1.7 M ops/s** | 0.59 µs | Signature-bound template | -## Testing & Benchmarks +**Key insights:** +- **Static key** (`"feature_flags"`) is the fastest — no key computation at all (~6 M ops/s) +- **Simple lambda** (`lambda uid: f"u:{uid}"`) is **2.3× faster** than a named template — it skips signature inspection entirely +- **Hashing in the key** (`md5`, `sha256`) adds ~0.5 µs per call — use only when inputs are unbounded strings +- **Metrics** add ~0.4 µs per call; use `NULL_METRICS` (default) on ultra-hot paths ```bash -uv run pytest -q uv run python tests/benchmark.py +BENCH_N=500000 uv run python tests/benchmark.py ``` --- -## Use Cases - -* Web & API caching (FastAPI, Flask, Django) -* Database query caching -* SWR for upstream APIs -* Background refresh for configs & datasets -* Distributed caching with Redis -* Hybrid L1/L2 hot-path optimization - ---- +## Testing -## Comparison +```bash +uv pip install -e ".[dev,redis,tests]" -| Feature | advanced-caching | lru_cache | cachetools | Redis | Memcached | -| ------------------- | ---------------- | --------- | ---------- | ------ | --------- | -| TTL | ✅ | ❌ | ✅ | ✅ | ✅ | -| SWR | ✅ | ❌ | ❌ | Manual | Manual | -| Background refresh | ✅ | ❌ | ❌ | Manual | Manual | -| Custom backends | ✅ (InMem/Redis/S3/GCS/Chain) | ❌ | ❌ | N/A | N/A | -| Distributed | ✅ (Redis, ChainCache) | ❌ | ❌ | ✅ | ✅ | -| Multi-level chain | ✅ (ChainCache) | ❌ | ❌ | Manual | Manual | -| Dedupe writes | ✅ (Redis/S3/GCS opt-in) | ❌ | ❌ | Manual | Manual | -| Async support | ✅ | ❌ | ❌ | ✅ | ✅ | -| Type hints | ✅ | ✅ | ✅ | ❌ | ❌ | +uv run pytest -q # all unit tests +uv run pytest tests/test_integration_redis.py # Redis (requires Docker) +uv run pytest tests/test_s3_cache_integration.py # S3/GCS (docker-compose up) +``` ---- +Runnable examples: -## Contributing +```bash +uv run python examples/quickstart.py +uv run python examples/writer_reader.py +uv run python examples/serializers_example.py +uv run python examples/metrics_and_exporters.py +``` -1. Fork the repo -2. Create a feature branch -3. Add tests -4. Run `uv run pytest` -5. Open a pull request +📖 Full API reference, production patterns, and configuration: **[docs/guide.md](docs/guide.md)** --- ## License -MIT License – see [LICENSE](LICENSE). \ No newline at end of file + +MIT — see [LICENSE](LICENSE). diff --git a/docs/benchmarking-and-profiling.md b/docs/benchmarking-and-profiling.md deleted file mode 100644 index 5e14f6e..0000000 --- a/docs/benchmarking-and-profiling.md +++ /dev/null @@ -1,181 +0,0 @@ -# Benchmarking & Profiling - -This repo includes a small, reproducible benchmark harness and a profiler-friendly workload script. - -- Benchmark suite: `tests/benchmark.py` -- Profiler workload: `tests/profile_decorators.py` -- Benchmark log (append-only JSON-lines): `benchmarks.log` - -## 1) Benchmarking (step-by-step) - -### Step 0 — Ensure the environment is ready (uv) - -This repo uses `uv`. From the repo root: - -```bash -uv sync -``` - -### Step 1 — Run the benchmark suite - -```bash -uv run python tests/benchmark.py -``` - -What you get: -- Printed tables for **hot cache hits** (comparing TTLCache, SWRCache, BGCache). -- A new JSON entry appended to `benchmarks.log` with the config + median/mean/stdev per strategy. - -### Step 2 — Tune benchmark parameters (optional) - -`tests/benchmark.py` reads these environment variables: - -- `BENCH_SEED` (default `12345`) -- `BENCH_WORK_MS` (default `5.0`) — simulated I/O latency (sleep) -- `BENCH_WARMUP` (default `10`) -- `BENCH_RUNS` (default `300`) - -Examples: - -```bash -BENCH_RUNS=1000 uv run python tests/benchmark.py -``` - -### Step 3 — Compare two runs - -The benchmark appends JSON lines to `benchmarks.log`. A quick helper to list runs: - -```bash -uv run python - <<'PY' -import json -from pathlib import Path -runs=[] -if not Path('benchmarks.log').exists(): - print("No benchmarks.log found") - exit(0) -for line in Path('benchmarks.log').read_text(encoding='utf-8', errors='replace').splitlines(): - line=line.strip() - if not line.startswith('{'): - continue - try: - obj=json.loads(line) - except Exception: - continue - if isinstance(obj,dict) and 'sections' in obj: - runs.append(obj) -print('count',len(runs)) -for i,r in enumerate(runs): - print(i,r.get('ts')) -PY -``` - -To compare two indices (e.g., 2 vs 11), load the JSON objects in a notebook or script and diff the `sections` (hot medians for TTL/SWR/BG are the most sensitive to overhead changes). - -### Step 4 — Make results stable (recommended practice) - -- Run each benchmark **multiple times** and compare trends, not a single result. -- Prefer a quiet machine (close CPU-heavy apps). -- Compare runs with identical config (same `BENCH_*` values). - -## 2) Profiling with Scalene (step-by-step) - -### Step 0 — Install Scalene into the uv env - -If Scalene isn’t already available in your uv environment: - -```bash -uv pip install scalene -``` - -Scalene is useful to answer: “where is the CPU time going?” - -### Step 1 — Profile the benchmark itself (realistic) - -This includes the simulated `sleep` and will mostly show “time in system / sleeping”. -It’s useful for end-to-end sanity, but not for micro-optimizing the decorators. - -```bash -uv run python -m scalene --cli --reduced-profile --outfile scalene_benchmark.txt tests/benchmark.py -``` - -### Step 2 — Profile decorator overhead (recommended) - -Run the benchmark with no artificial sleep and more iterations: - -```bash -BENCH_WORK_MS=0 BENCH_RUNS=200000 BENCH_WARMUP=2000 BENCH_MIXED_RUNS=300000 \ - uv run python \ - -m scalene --cli --reduced-profile --profile-all --cpu --outfile scalene_overhead.txt \ - tests/benchmark.py -``` - -Notes: -- `--profile-all` includes imported modules (e.g., `src/advanced_caching/*.py`). -- `--reduced-profile` keeps output small and focused. - -### Step 3 — Profile tight loops (best for line-level hotspots) - -`tests/profile_decorators.py` is designed for profilers: -- It runs tight loops calling cached functions. -- It shuts down the BG scheduler at the end to reduce background-thread noise. - -```bash -PROFILE_N=5000000 \ - uv run python \ - -m scalene --cli --reduced-profile --profile-all --cpu --outfile scalene_profile.txt \ - tests/profile_decorators.py -``` - -Optional JSON output (handy for scripting): - -```bash -PROFILE_N=5000000 \ - uv run python \ - -m scalene --cli --json --outfile scalene_profile.json \ - tests/profile_decorators.py -``` - -## 3) What to look at (a practical checklist) - -### A) Benchmark output - -- **Hot path** - - `TTLCache` hot: overhead of key generation + `get()` + return. - - `SWRCache` hot: overhead of key generation + `get_entry()` + freshness checks. - - `BGCache` hot: overhead of key lookup + `get()` + return. - -- **Async results (important)** - - Async medians include the cost of creating/awaiting a coroutine and event-loop scheduling. - - For AsyncBG/AsyncSWR, compare against the `async_baseline` row (plain `await` with no cache) to estimate *cache-specific* overhead. - -- **Mixed path** - - A high mean + low median typically indicates occasional slow misses/refreshes. - -### B) Scalene output - -Look for time concentrated in: -- `src/advanced_caching/decorators.py` - - key building (template formatting) - - repeated `get_cache()` calls (should be minimized) - - SWR “fresh vs stale” checks -- `src/advanced_caching/storage.py` - - lock contention (`with self._lock:`) - - `time.time()` calls - - dict lookups (`self._data.get(key)`) - -Signals that often matter: -- Lots of time in `threading.py` / `Condition.wait` / `Thread.run` usually means background threads are running and being sampled. Prefer the tight-loop profiler script and/or make sure background work is shut down. - -## 4) Common pitfalls - -- Comparing benchmark runs with different configs (different `BENCH_*` values). -- Profiling with `BENCH_WORK_MS=5` and expecting line-level decorator hotspots (sleep dominates). -- Treating single-run noise as a regression (always repeat). - -## 5) Typical workflow - -1. Run `tests/benchmark.py` (default) a few times. -2. If you change code, re-run and compare with `tests/compare_benchmarks.py`. -3. If you need to optimize, profile with: - - `BENCH_WORK_MS=0` + `--profile-all` for imported modules - - `tests/profile_decorators.py` for clean line-level hotspots diff --git a/docs/bgcache.md b/docs/bgcache.md deleted file mode 100644 index 8f8bce6..0000000 --- a/docs/bgcache.md +++ /dev/null @@ -1,149 +0,0 @@ -# BGCache: Single-Writer / Multi-Reader (Production Example) - -This guide shows a production-grade split of BGCache writer and readers, including background refresh, error handling, and per-process reader caches. - -## Goals -- One writer per key (enforced) refreshing a shared cache (e.g., Redis or ChainCache). -- Many readers in different processes/threads pulling from the writer’s cache and keeping a local L1 warm. -- Graceful error handling, optional run-immediately load, and configurable intervals/TTLs. - -## Recommended Topology -- **Writer cache**: a shared backend (e.g., `RedisCache`, `ChainCache` with Redis+S3, or plain `InMemCache` if single-process). -- **Reader cache**: a fast local cache per process (e.g., `InMemCache`) that periodically pulls from the writer cache. - -## End-to-end Example (multiple writers/readers, object storage cold tier) - -```python -import logging -from advanced_caching import BGCache, InMemCache, RedisCache, ChainCache - -logger = logging.getLogger(__name__) - -# Shared writer cache: InMem L1 + Redis L2 + object storage L3 (S3/GCS/local file) -shared_writer_cache = ChainCache([ - (InMemCache(), 30), - (RedisCache(redis_client, dedupe_writes=True), 300), - # Choose one cold tier: - # (S3Cache(bucket="my-cache", dedupe_writes=True), 3600), - # (GCSCache(bucket="my-cache", dedupe_writes=True), 3600), - # (LocalFileCache("/var/tmp/bgcache", dedupe_writes=True), 3600), -]) - -# Writer 1: daily config -@BGCache.register_writer( - "daily_config", - interval_seconds=300, - ttl=None, - run_immediately=True, - on_error=lambda e: logger.error("daily_config writer failed", exc_info=e), - cache=shared_writer_cache, -) -def refresh_config(): - return load_config_from_db_or_api() - -# Writer 2: feature flags -@BGCache.register_writer( - "feature_flags", - interval_seconds=120, - ttl=None, - run_immediately=True, - on_error=lambda e: logger.error("feature_flags writer failed", exc_info=e), - cache=shared_writer_cache, -) -def refresh_flags(): - return load_flags_from_control_plane() - -# Readers: each process uses its own local cache and pulls from the writer cache -reader_local_cache = InMemCache() - -get_config = BGCache.get_reader( - "daily_config", - interval_seconds=60, - ttl=None, - run_immediately=True, - on_error=lambda e: logger.warning("daily_config reader pull failed", exc_info=e), - cache=shared_writer_cache, # source cache (writer’s cache, includes cold tier) -) - -get_flags = BGCache.get_reader( - "feature_flags", - interval_seconds=30, - ttl=None, - run_immediately=True, - on_error=lambda e: logger.warning("feature_flags reader pull failed", exc_info=e), - cache=shared_writer_cache, -) - -# Usage in app code -cfg = get_config() # from local reader cache; on miss pulls once from writer cache -flags = get_flags() # same pattern for feature flags -``` - -### Why this works well -- **Single writer enforced**: `register_writer` raises if the key is registered twice. -- **Background refresh**: writer schedules updates; readers schedule pulls from writer cache. -- **Local read performance**: readers serve from per-process `InMemCache`, reducing Redis/object-store round-trips. -- **Dedupe writes**: `dedupe_writes=True` on RedisCache avoids redundant writes (and refreshes TTL when unchanged). - -### Tuning knobs -- `interval_seconds`: writer refresh period; reader pull period. Set to `0` to disable scheduling and rely on on-demand fetch. -- `ttl`: defaults to `interval_seconds * 2` when not provided. For readers, this is the local cache TTL. -- `run_immediately`: seed cache on startup if empty. -- `on_error`: handle/log exceptions from the writer refresh job. -- `cache`: use a distributed cache for the writer; for readers, this is the *source* cache they pull from, while they maintain their own local cache internally. - -### Async variants -- Both writer and reader functions can be `async def`; BGCache picks the appropriate scheduler (AsyncIOScheduler / BackgroundScheduler). The reader returned is sync callable but can call async sources when provided. - -### Using ChainCache for deeper hierarchies -- Cold tiers: S3Cache, GCSCache, LocalFileCache can sit behind Redis in ChainCache for durable or per-host persistence. - - S3/GCS: set `dedupe_writes=True` to avoid rewriting unchanged blobs (uses metadata hashes). - - LocalFileCache: per-host cache with atomic writes; useful when object storage isn’t available. - - Tune per-level TTL caps in the ChainCache tuples. - -## Operational tips -- Call `BGCache.shutdown()` in test teardown or graceful shutdown to stop schedulers. -- Keep `interval_seconds` moderately larger than your refresh latency to avoid overlaps. -- Monitor writer errors via `on_error`; consider alerts if refresh fails repeatedly. -- For high-QPS readers, keep `interval_seconds` small enough to ensure local caches stay warm. - -## Minimal test harness (pytest style) - -```python -import pytest -import asyncio -from advanced_caching import BGCache, InMemCache - -@pytest.mark.asyncio -async def test_bgcache_writer_reader(): - calls = {"n": 0} - writer_cache = InMemCache() - - @BGCache.register_writer("demo", interval_seconds=0.05, cache=writer_cache) - def writer(): - calls["n"] += 1 - return {"v": calls["n"]} - - reader = BGCache.get_reader( - "demo", interval_seconds=0.05, cache=writer_cache, run_immediately=True - ) - - await asyncio.sleep(0.1) - v1 = reader() - assert v1 and v1["v"] >= 1 - - await asyncio.sleep(0.1) - v2 = reader() - assert v2 and v2["v"] >= v1["v"] - - BGCache.shutdown() -``` - -## Checklist for production -- [ ] Shared writer cache (Redis/ChainCache) sized and monitored -- [ ] Reader local caches sized appropriately -- [ ] `on_error` hooked for alerting -- [ ] Reasonable `interval_seconds` and `ttl` -- [ ] `BGCache.shutdown()` on service shutdown/tests -- [ ] Dedupe enabled where write amplification matters (Redis/S3/GCS) -- [ ] ChainCache tiers tuned (per-level TTL caps) diff --git a/docs/custom-metrics-exporters.md b/docs/custom-metrics-exporters.md deleted file mode 100644 index 8bc8c15..0000000 --- a/docs/custom-metrics-exporters.md +++ /dev/null @@ -1,49 +0,0 @@ -## Creating Your Own Exporter - -To create a custom exporter, implement the `MetricsCollector` protocol: - -```python -from advanced_caching.metrics import MetricsCollector -from typing import Any - -class MyCustomMetrics: - """Your custom metrics implementation.""" - - def record_hit(self, cache_name: str, key: str | None = None, metadata: dict[str, Any] | None = None) -> None: - # Your implementation - pass - - def record_miss(self, cache_name: str, key: str | None = None, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_set(self, cache_name: str, key: str | None = None, value_size: int | None = None, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_delete(self, cache_name: str, key: str | None = None, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_latency(self, cache_name: str, operation: str, duration_seconds: float, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_error(self, cache_name: str, operation: str, error_type: str, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_memory_usage(self, cache_name: str, bytes_used: int, entry_count: int | None = None, metadata: dict[str, Any] | None = None) -> None: - pass - - def record_background_refresh(self, cache_name: str, success: bool, duration_seconds: float | None = None, metadata: dict[str, Any] | None = None) -> None: - pass -``` - -## Performance Tips - -2. **Batch writes**: For HTTP-based exporters, batch multiple metrics into single requests -3. **Async export**: Export metrics asynchronously to avoid blocking cache operations -4. **Sample rates**: For very high traffic, consider sampling (e.g., record 1 in 10 operations) -5. **Buffer metrics**: Collect metrics in memory and flush periodically - -## See Also - -- [Main Metrics Documentation](metrics.md) -- [GCP Cloud Monitoring](metrics.md#gcp-cloud-monitoring) -- [OpenTelemetry](metrics.md#opentelemetry) diff --git a/docs/guide.md b/docs/guide.md new file mode 100644 index 0000000..40c6106 --- /dev/null +++ b/docs/guide.md @@ -0,0 +1,1215 @@ +# advanced-caching — Production Guide + +> Python ≥ 3.10 · async-native · type-safe · pluggable backends + +--- + +## Table of Contents + +1. [Core Concepts](#1-core-concepts) +2. [`@cache` Reference](#2-cache-reference) +3. [`@bg` Reference](#3-bg-reference) +4. [Storage Backends](#4-storage-backends) +5. [Serializers](#5-serializers) +6. [Metrics & Observability](#6-metrics--observability) +7. [Key Generation](#7-key-generation) +8. [Production Patterns](#8-production-patterns) +9. [Performance Guide](#9-performance-guide) +10. [Configuration Reference](#10-configuration-reference) +11. [Examples](#11-examples) + +--- + +## 1. Core Concepts + +The library exposes **two symbols**: `cache` and `bg`. + +```python +from advanced_caching import cache, bg +``` + +### Three Caching Strategies + +```mermaid +flowchart LR + subgraph TTL["TTL @cache(ttl)"] + A[Request] --> B{fresh?} + B -- yes --> C[Return cached] + B -- no --> D[Call fn → store → return] + end + + subgraph SWR["SWR @cache(ttl, stale=N)"] + E[Request] --> F{fresh?} + F -- yes --> G[Return cached] + F -- stale --> H[Return stale\n+ bg refresh] + F -- dead --> I[Call fn → store → return] + end + + subgraph BG["Background @bg(interval)"] + J[Scheduler] -->|every N s| K[Call fn → store] + L[Request] --> M[cache.get → instant] + end +``` + +### TTL Lifecycle + +```mermaid +stateDiagram-v2 + [*] --> Missing : first request / invalidation + Missing --> Fresh : fn() called, result stored + Fresh --> Fresh : cache hit (no fn call) + Fresh --> Stale : ttl elapsed (SWR only) + Stale --> Fresh : background refresh completed + Stale --> Missing : stale+ttl elapsed (dead) + Fresh --> Missing : .invalidate() / .clear() +``` + +### SWR Time Windows + +```mermaid +gantt + title SWR Key Lifecycle (ttl=60s, stale=30s) + dateFormat s + axisFormat t=%ss + + section Entry state + Fresh — served from cache, no fn call :active, 0, 60 + Stale — served immediately + bg refresh : 60, 30 + Dead — blocks caller to refresh :crit, 90, 30 +``` + +### Background Refresh Architecture + +```mermaid +sequenceDiagram + participant Scheduler + participant Cache + participant Fn as Decorated fn + participant Caller + + Scheduler->>Fn: trigger every N seconds + Fn-->>Cache: cache.set(key, result, ttl) + + Caller->>Cache: cache.get(key) + Cache-->>Caller: value (instant, sub-μs) + + note over Caller,Cache: Caller never waits for Fn +``` + +--- + +## 2. `@cache` Reference + +### Signature + +```python +cache( + ttl: int | float, + *, + key: str | Callable, + stale: int | float = 0, + store: CacheStorage | type | Callable | None = None, + metrics: MetricsCollector | None = None, +) +``` + +| Parameter | Type | Default | Notes | +|-----------|------|---------|-------| +| `ttl` | `int \| float` | required | `0` = bypass cache entirely | +| `key` | `str \| Callable` | required | Template or callable key factory | +| `stale` | `int \| float` | `0` | SWR window length (seconds). `> 0` enables SWR | +| `store` | backend | `None` → `InMemCache()` | Instance, class, or factory callable | +| `metrics` | `MetricsCollector` | `None` | Any `MetricsCollector` implementation | + +### TTL Cache + +```python +@cache(60, key="user:{user_id}") +async def get_user(user_id: int) -> dict: + return await db.fetch_user(user_id) + +# Works identically for sync functions: +@cache(60, key="config:{env}") +def get_config(env: str) -> dict: + return load_from_file(env) +``` + +### Stale-While-Revalidate + +```python +@cache(60, stale=30, key="price:{symbol}") +async def get_price(symbol: str) -> float: + return await exchange_api.fetch(symbol) +``` + +- `t < 60s` → cache hit, no fn call +- `60s < t < 90s` → return stale value instantly, trigger background refresh +- `t > 90s` → entry dead, block caller, refresh synchronously + +### Invalidation + +Every decorated function gets two methods: + +```python +# Delete a specific cache entry (same args as the decorated fn): +await get_user.invalidate(42) # deletes "user:42" +get_config.invalidate("prod") # deletes "config:prod" + +# Wipe everything in the store: +get_user.clear() +``` + +### Bypass Cache + +```python +@cache(0, key="debug:{x}") # ttl=0 → always call fn, never store +def uncached(x: int) -> int: ... +``` + +### Custom Store Factory + +Pass a callable (called once per decoration) to create a fresh store per function: + +```python +from advanced_caching import cache, InMemCache + +@cache(60, key="fn1:{x}", store=InMemCache) # class → new instance +@cache(60, key="fn2:{x}", store=lambda: InMemCache()) # factory +def compute(x: int) -> int: ... +``` + +--- + +## 3. `@bg` Reference + +`@bg` decouples the refresh cycle entirely from request handlers. +Every call is a local cache read — the function never blocks the caller. + +### Signature + +```python +bg( + interval: int | float, # seconds between refreshes + *, + key: str, # cache key (no template placeholders for bg) + ttl: int | float | None = None, + store: CacheStorage | type | Callable | None = None, + metrics: MetricsCollector | None = None, + on_error: Callable[[Exception], None] | None = None, + run_immediately: bool = True, +) +``` + +### Basic Usage + +```python +@bg(300, key="feature_flags") +async def load_flags() -> dict: + return await remote_config.fetch() + +flags = await load_flags() # instant after first call +``` + +### Sync Functions + +```python +@bg(60, key="db_stats") +def collect_stats() -> dict: + return db.execute("SELECT count(*) FROM users").fetchone() + +stats = collect_stats() +``` + +### Error Handling + +```python +import logging + +@bg(60, key="rates", on_error=lambda e: logging.error("refresh failed: %s", e)) +async def refresh_rates() -> dict: + return await forex_api.fetch() +``` + +If `on_error` is not set, exceptions are logged at WARNING level and the stale value is kept. + +### `bg.write` / `bg.read` — Multi-Process Pattern + +```mermaid +flowchart TD + subgraph WP["Worker Process (one per cluster)"] + BW["@bg.write(interval, key, store=redis)"] + BW -->|every N s| FN["refresh fn()"] + FN --> RD[(Redis\nShared Store)] + end + + subgraph WEB1["Web Process A"] + BR1["bg.read(key, interval=30, store=redis)"] + BR1 -->|sync every 30s| RD + BR1 --> MC1[("Private\nInMemCache")] + MC1 -->|sub-μs| REQ1[Request handler] + end + + subgraph WEB2["Web Process B"] + BR2["bg.read(key, interval=30, store=redis)"] + BR2 -->|sync every 30s| RD + BR2 --> MC2[("Private\nInMemCache")] + MC2 -->|sub-μs| REQ2[Request handler] + end + + style RD fill:#f90,color:#000 +``` + +#### `bg.write` + +```python +bg.write( + interval: int | float, + *, + key: str, + ttl: int | float | None = None, + store: CacheStorage | None = None, # shared backend (Redis) + metrics: MetricsCollector | None = None, + on_error: Callable | None = None, + run_immediately: bool = True, +) +``` + +- **One writer per key per process** — raises `ValueError` on duplicate registration. +- `metrics=` tracks `background_refresh` success/failure + latency. + +```python +@bg.write(60, key="exchange_rates", store=redis_store, metrics=metrics) +async def refresh_rates() -> dict: + return await forex_api.fetch_all() +``` + +#### `bg.read` + +```python +bg.read( + key: str, + *, + interval: int | float = 0, + ttl: int | float | None = None, + store: CacheStorage | None = None, # None → auto-discover writer's store (same process) + metrics: MetricsCollector | None = None, + on_error: Callable | None = None, + run_immediately: bool = True, +) -> Callable[[], Any] +``` + +- Returns a **callable** — call it to get the current value from the local mirror. +- Each call to `bg.read()` creates an **independent** private local cache. +- `store=None` → auto-discovers the writer's store if `bg.write(key=…)` was called in the same process. + +```python +# Same process as writer → auto-discovers redis_store +get_rates = bg.read("exchange_rates") +rates = get_rates() # local dict lookup, never touches Redis + +# Different process → must provide the store explicitly +get_rates = bg.read("exchange_rates", interval=30, store=redis_store) +``` + +#### `bg.shutdown` + +```python +bg.shutdown(wait=True) +``` + +Stops all background schedulers. Register at app shutdown: + +```python +import atexit +atexit.register(bg.shutdown) +``` + +--- + +## 4. Storage Backends + +### Protocol + +All backends implement `CacheStorage`: + +```python +class CacheStorage(Protocol): + def get(self, key: str) -> Any | None: ... + def set(self, key: str, value: Any, ttl: int | float) -> None: ... + def delete(self, key: str) -> None: ... + def exists(self, key: str) -> bool: ... + def get_entry(self, key: str) -> CacheEntry | None: ... + def set_entry(self, key: str, entry: CacheEntry) -> None: ... + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: ... + def get_many(self, keys: list[str]) -> dict[str, Any]: ... + def set_many(self, items: dict[str, Any], ttl: int | float) -> None: ... + def clear(self) -> None: ... +``` + +### Backend Comparison + +```mermaid +flowchart LR + Q{Deployment type?} + Q --> SP[Single process] + Q --> MP[Multi-process] + Q --> BIG[Large objects] + Q --> DISK[Local persistence] + + SP --> InMem[InMemCache\n~10M ops/s] + MP --> Redis[RedisCache\n~50k ops/s net-bound] + MP --> Chain["ChainCache\nL1 InMem + L2 Redis"] + BIG --> S3["S3Cache / GCSCache"] + DISK --> LF[LocalFileCache] + Chain --> InMem2[L1 hit: ~6M ops/s] + Chain --> Redis2[L2 hit: network RTT] +``` + +### `InMemCache` + +Thread-safe, lock-free hot path (GIL atomicity on `dict.get`). + +```python +from advanced_caching import InMemCache + +store = InMemCache() + +@cache(60, key="user:{id}", store=store) +def get_user(id: int) -> dict: ... +``` + +### `RedisCache` + +```python +import redis +from advanced_caching import RedisCache, serializers + +r = redis.from_url("redis://localhost:6379", decode_responses=False) + +store = RedisCache( + r, + prefix="myapp:", # key namespace + serializer=serializers.msgpack, # optional — default: pickle +) + +@cache(3600, key="catalog:{page}", store=store) +async def get_catalog(page: int) -> list: ... +``` + +Connection pooling (recommended): + +```python +pool = redis.ConnectionPool.from_url("redis://localhost", max_connections=20) +r = redis.Redis(connection_pool=pool, decode_responses=False) +store = RedisCache(r, prefix="app:") +``` + +### `ChainCache` — N-Level Read-Through + +```mermaid +sequenceDiagram + participant C as Caller + participant L1 as L1 InMemCache + participant L2 as L2 RedisCache + participant FN as Source fn + + C->>L1: get(key) + alt L1 hit + L1-->>C: value (sub-μs) + else L1 miss + L1->>L2: get(key) + alt L2 hit + L2-->>L1: value (backfill L1) + L1-->>C: value + else L2 miss + L2->>FN: call fn() + FN-->>L2: result (backfill L2) + L2-->>L1: result (backfill L1) + L1-->>C: result + end + end +``` + +```python +from advanced_caching import ChainCache, InMemCache, RedisCache + +chain = ChainCache.build( + InMemCache(), + RedisCache(r, prefix="v1:"), + ttls=[60, 3600], # L1 TTL, L2 TTL +) + +@cache(3600, key="item:{id}", store=chain) +async def get_item(id: int) -> dict: ... +``` + +Three or more levels: + +```python +three_tier = ChainCache.build(l1, l2, l3, ttls=[60, 3600, 86400]) +``` + +### `HybridCache` + +Convenience wrapper: L1 in-memory + L2 Redis with configurable TTLs. + +```python +from advanced_caching import HybridCache + +hybrid = HybridCache( + l1_ttl=60, + l1_cache=InMemCache(), + l2_ttl=3600, + l2_cache=RedisCache(r), +) +``` + +### `LocalFileCache` + +Per-host disk persistence. Entries are gzip-compressed blobs. + +```python +from advanced_caching import LocalFileCache, serializers + +store = LocalFileCache( + "/var/cache/myapp", + serializer=serializers.json, # optional +) +``` + +### `S3Cache` / `GCSCache` + +For large objects, ML artifacts, or cheap durable caching. + +```python +from advanced_caching import S3Cache, GCSCache, serializers + +# AWS S3 +s3 = S3Cache(bucket="myapp-cache", prefix="v1/", serializer=serializers.msgpack) + +# Google Cloud Storage +gcs = GCSCache(bucket="myapp-cache", prefix="v1/", serializer=serializers.json) + +@cache(86400, key="ml_features:{entity_id}", store=s3) +async def get_features(entity_id: str) -> dict: ... +``` + +--- + +## 5. Serializers + +### Pipeline + +```mermaid +flowchart LR + V[Python value] --> SE[Serializer.dumps] + SE --> HDR["16-byte header\nfresh_until + created_at"] + HDR --> BYTES[Raw bytes] + BYTES --> STORE[(Backend\nRedis/S3/File)] + + STORE --> LOAD[Serializer.loads] + LOAD --> V2[Python value] + + style HDR fill:#eef,stroke:#99f +``` + +The wire format is always: `[8-byte float: fresh_until][8-byte float: created_at][serialized value]`. +This is metadata-agnostic — any serializer works without needing a schema for the cache entry header. + +### Built-in Serializers + +```python +from advanced_caching import serializers + +serializers.json # orjson (default) — fastest for JSON-serializable data +serializers.pickle # any Python object, no schema required +serializers.msgpack # compact binary (requires pip install msgpack) +serializers.protobuf(MyProtoClass) # Protocol Buffers (requires protobuf) +``` + +### Usage + +```python +from advanced_caching import RedisCache, LocalFileCache, serializers + +# JSON-safe data (dicts, lists, primitives) +redis_json = RedisCache(r, serializer=serializers.json) + +# Arbitrary Python (dataclasses, custom objects) +redis_pickle = RedisCache(r, serializer=serializers.pickle) + +# Compact binary (large payloads, best compression) +redis_msgpack = RedisCache(r, serializer=serializers.msgpack) + +# Protobuf (schema-enforced, cross-language) +redis_proto = RedisCache(r, serializer=serializers.protobuf(MyProto)) +``` + +### Custom Serializer + +Implement two methods — that's all: + +```python +class MySerializer: + def dumps(self, value: object) -> bytes: ... + def loads(self, data: bytes) -> object: ... + +store = RedisCache(r, serializer=MySerializer()) +``` + +--- + +## 6. Metrics & Observability + +### Architecture + +```mermaid +flowchart LR + DEC["@cache / @bg"] --> IS[InstrumentedStorage] + IS --> STORE[(Backend)] + IS --> MC[MetricsCollector] + MC --> IMM[InMemoryMetrics] + MC --> PROM[PrometheusMetrics] + MC --> OTEL[OpenTelemetryMetrics] + MC --> GCP[GCPCloudMonitoringMetrics] + MC --> CUSTOM[Your own] + + style MC fill:#ffd,stroke:#aa0 +``` + +### `InMemoryMetrics` + +```python +from advanced_caching import InMemoryMetrics + +metrics = InMemoryMetrics() + +@cache(60, key="user:{uid}", metrics=metrics) +async def get_user(uid: int) -> dict: ... + +@bg(300, key="flags", metrics=metrics) +async def load_flags() -> dict: ... + +stats = metrics.get_stats() +``` + +`get_stats()` returns a structured dict — every section is keyed by `cache_name` (the decorated function's `__name__`, or the `InstrumentedStorage` label you choose): + +```python +{ + "uptime_seconds": 12.3, + + # per-function hit/miss counters + "caches": { + "get_user": { + "hits": 120, "misses": 5, "sets": 5, "deletes": 0, + "hit_rate_percent": 96.0 + } + }, + + # per-function, per-operation latency percentiles (ms) + "latency": { + "get_user.get": {"count": 125, "p50_ms": 0.01, "p95_ms": 0.05, "p99_ms": 0.12, "avg_ms": 0.02}, + "get_user.set": {"count": 5, "p50_ms": 0.02, "p95_ms": 0.08, "p99_ms": 0.11, "avg_ms": 0.03} + }, + + # errors keyed as ".": {"": count} + "errors": {}, + + # optional memory snapshot (if backend reports it) + "memory": { + "get_user": {"bytes": 4096, "entries": 5, "mb": 0.004} + }, + + # @bg background refresh success/failure counts + "background_refresh": { + "flags": {"success": 12, "failure": 0} + } +} +``` + +### ChainCache — per-layer metrics + +Wrapping the whole chain with one `InstrumentedStorage` only gives you totals. +Wrap **each layer individually** to get per-tier breakdown: + +```python +from advanced_caching import ChainCache, InMemCache, RedisCache, S3Cache, InMemoryMetrics +from advanced_caching.storage.utils import InstrumentedStorage + +m = InMemoryMetrics() + +chain = ChainCache.build( + InstrumentedStorage(InMemCache(), m, "L1:inmem"), # ← named per layer + InstrumentedStorage(RedisCache(r), m, "L2:redis"), + InstrumentedStorage(S3Cache(s3, "bkt"), m, "L3:s3"), + ttls=[60, 300, 3600], +) + +@cache(3600, key="catalog:{page}", store=chain) +async def get_catalog(page: int) -> list: ... +``` + +`m.get_stats()["caches"]` then shows hit rates per tier — so you can immediately see whether your L1 is sized correctly or whether most traffic is falling through to Redis/S3: + +``` +Layer hits misses sets hit_rate +----------- ---- ------ ---- -------- +L1:inmem 87 5 5 94% +L2:redis 4 1 1 80% +L3:s3 1 0 0 100% +``` + +> **Reading the table**: a healthy setup has almost all hits at L1. If L2/L3 hit rates are high it means L1 is evicting too early — raise its TTL or increase its size. + +### Custom Metrics Collector + +Implement the `MetricsCollector` protocol: + +```python +class MyMetrics: + def record_hit(self, cache_name: str, key: str | None = None, metadata=None): ... + def record_miss(self, cache_name: str, key: str | None = None, metadata=None): ... + def record_set(self, cache_name: str, key: str | None = None, value_size: int | None = None, metadata=None): ... + def record_delete(self, cache_name: str, key: str | None = None, metadata=None): ... + def record_latency(self, cache_name: str, operation: str | None = None, duration_seconds: float | None = None, metadata=None): ... + def record_error(self, cache_name: str, operation: str | None = None, error_type: str | None = None, metadata=None): ... + def record_memory_usage(self, cache_name: str, bytes_used: int | None = None, entry_count: int | None = None, metadata=None): ... + def record_background_refresh(self, cache_name: str, success: bool | None = None, duration_seconds: float | None = None, metadata=None): ... +``` + +### NULL_METRICS + +Zero-overhead no-op for development or when metrics are disabled: + +```python +from advanced_caching.metrics import NULL_METRICS + +@cache(60, key="fast:{x}", metrics=NULL_METRICS) +def fast_fn(x: int) -> int: ... +``` + +### Prometheus / OpenTelemetry / GCP + +```python +# Prometheus (pip install prometheus_client) +from advanced_caching.exporters import PrometheusMetrics +metrics = PrometheusMetrics(namespace="myapp", subsystem="cache") + +# OpenTelemetry (pip install opentelemetry-api) +from advanced_caching.exporters import OpenTelemetryMetrics +metrics = OpenTelemetryMetrics(meter_name="myapp.cache") + +# GCP Cloud Monitoring (pip install google-cloud-monitoring) +from advanced_caching.exporters import GCPCloudMonitoringMetrics +metrics = GCPCloudMonitoringMetrics(project_id="my-project") + +@cache(60, key="user:{uid}", metrics=metrics) +async def get_user(uid: int) -> dict: ... +``` + +--- + +## 7. Key Generation + +### Template Styles + +```mermaid +flowchart TD + K[key= parameter] --> S{Style?} + S -- "static string" --> STATIC["'feature_flags'\n→ 'feature_flags'"] + S -- "positional {}" --> POS["'user:{}'\n→ 'user:42' (first arg)"] + S -- "named {name}" --> NAMED["'user:{user_id}'\n→ 'user:42' (by kwarg name)"] + S -- "multi named" --> MULTI["'order:{user_id}:{order_id}'\n→ 'order:1:99'"] + S -- "callable" --> CALL["key=lambda uid, **_: f'u:{uid}'\n→ 'u:42'"] +``` + +### Performance by Key Style + +| Style | Example | Throughput | +|-------|---------|-----------| +| Static | `key="flags"` | ~16 M ops/s | +| Positional `{}` | `key="user:{}"` | ~7 M ops/s | +| Named `{name}` | `key="user:{user_id}"` | ~2 M ops/s | +| Multi-named | `key="order:{uid}:{oid}"` | ~2 M ops/s | +| Callable | `key=lambda u: f"u:{u}"` | varies | + +### Examples + +```python +# Static — zero resolution cost +@cache(60, key="feature_flags") +async def load_flags() -> dict: ... + +# Positional — first argument only +@cache(60, key="user:{}") +async def get_user(user_id: int) -> dict: ... + +# Named — resolved by parameter name +@cache(60, key="order:{user_id}:{order_id}") +async def get_order(user_id: int, order_id: int) -> dict: ... + +# Callable — full Python, no format string limits +@cache(60, key=lambda uid, role: f"user:{role}:{uid}") +async def get_user_by_role(uid: int, role: str) -> dict: ... +``` + +### Callable Key Patterns + +A callable receives the **exact same `*args, **kwargs`** as the decorated function. Use it when string templates aren't enough: + +```python +# 1. Multi-arg tenant isolation +@cache(60, key=lambda tenant, resource_id: f"{tenant}:res:{resource_id}") +async def get_resource(tenant: str, resource_id: int) -> dict: ... + +# 2. Conditional prefix (e.g. admin vs public namespace) +@cache(60, key=lambda resource_id, admin=False: ("admin" if admin else "public") + f":res:{resource_id}") +async def get_protected(resource_id: int, admin: bool = False) -> dict: ... + +# 3. Hash long/arbitrary inputs (raw SQL, long query strings) +import hashlib +def _query_key(query: str) -> str: + return "query:" + hashlib.sha256(query.encode()).hexdigest()[:16] + +@cache(30, key=_query_key) +async def run_query(query: str) -> list: ... + +# 4. Variadic — pick value from positional or keyword +@cache(300, key=lambda *a, **k: f"i18n:{k.get('lang', a[0] if a else 'en')}") +async def get_translations(lang: str = "en") -> dict: ... + +# 5. Invalidation works identically — callable computes the key to delete +@cache(60, key=lambda uid: f"u:{uid}") +def get_user(uid: int) -> dict: ... + +get_user.invalidate(42) # deletes key "u:42" +get_user.clear() # wipes entire store +``` + +> **Performance**: a simple lambda key skips signature inspection and runs at **~4 M ops/s** — roughly 2.3× faster than a named template (`~1.7 M ops/s`). Avoid calling expensive operations (network, hashing) in the key unless necessary. + +--- + +## 8. Production Patterns + +### Pattern 1 — FastAPI with Redis + Metrics + +```python +from contextlib import asynccontextmanager +import redis +from fastapi import FastAPI +from advanced_caching import cache, bg, RedisCache, ChainCache, InMemCache, InMemoryMetrics + +# ── Infrastructure ──────────────────────────────────────────────────────────── +pool = redis.ConnectionPool.from_url("redis://localhost", max_connections=20) +r = redis.Redis(connection_pool=pool, decode_responses=False) +redis_store = RedisCache(r, prefix="app:") +tiered = ChainCache.build(InMemCache(), redis_store, ttls=[60, 3600]) +metrics = InMemoryMetrics() + + +# ── Cache decorators ────────────────────────────────────────────────────────── +@cache(300, key="user:{user_id}", store=tiered, metrics=metrics) +async def get_user(user_id: int) -> dict: + return await db.fetch_user(user_id) + + +@bg(60, key="feature_flags", store=redis_store, metrics=metrics) +async def load_flags() -> dict: + return await remote_config.fetch() + + +# ── Lifespan ────────────────────────────────────────────────────────────────── +@asynccontextmanager +async def lifespan(app: FastAPI): + yield + bg.shutdown() + + +app = FastAPI(lifespan=lifespan) + + +@app.get("/users/{user_id}") +async def user_endpoint(user_id: int): + return await get_user(user_id) + + +@app.get("/metrics") +async def metrics_endpoint(): + return metrics.get_stats() +``` + +### Pattern 2 — Writer / Reader (Multi-Process) + +```mermaid +sequenceDiagram + participant WP as Worker Process + participant Redis + participant Web1 as Web Process 1 + participant Web2 as Web Process 2 + + WP->>WP: @bg.write(60, key="rates", store=redis) + loop every 60s + WP->>WP: refresh_rates() + WP->>Redis: set("rates", data, ttl=120) + end + + Web1->>Web1: bg.read("rates", interval=30, store=redis) + loop every 30s + Web1->>Redis: get("rates") + Redis-->>Web1: data → local InMemCache + end + + Web2->>Web2: bg.read("rates", interval=30, store=redis) + loop every 30s + Web2->>Redis: get("rates") + Redis-->>Web2: data → local InMemCache + end + + Note over Web1,Web2: Request handlers call local cache only (sub-μs) +``` + +```python +# ── worker.py ───────────────────────────────────────────────────────────────── +import redis +from advanced_caching import bg, RedisCache, InMemoryMetrics + +r = redis.from_url(REDIS_URL, decode_responses=False) +shared = RedisCache(r, prefix="shared:") +metrics = InMemoryMetrics() + +@bg.write(60, key="exchange_rates", store=shared, metrics=metrics) +async def refresh_rates() -> dict: + return await forex_api.fetch_all() + + +# ── web.py ──────────────────────────────────────────────────────────────────── +import redis +from advanced_caching import bg, RedisCache + +r = redis.from_url(REDIS_URL, decode_responses=False) +shared = RedisCache(r, prefix="shared:") + +# Each reader has its own private local cache — no interference between readers +get_rates = bg.read("exchange_rates", interval=30, store=shared) + +@app.get("/rates") +async def rates_endpoint(): + return get_rates() # always a local dict lookup, sub-microsecond +``` + +### Pattern 3 — Three-Tier Cache (InMem + Redis + S3) + +```mermaid +flowchart LR + REQ[Request] --> L1[L1\nInMemCache\n60s TTL] + L1 -- hit --> RES[Response] + L1 -- miss --> L2[L2\nRedisCache\n1h TTL] + L2 -- hit --> BF1[Backfill L1] + BF1 --> RES + L2 -- miss --> L3[L3\nS3Cache\n24h TTL] + L3 -- hit --> BF2[Backfill L2 + L1] + BF2 --> RES + L3 -- miss --> FN[Source fn] + FN --> STORE[Store all tiers] + STORE --> RES +``` + +```python +from advanced_caching import cache, ChainCache, InMemCache, RedisCache, S3Cache + +l1 = InMemCache() +l2 = RedisCache(redis.from_url(REDIS_URL, decode_responses=False), prefix="v1:") +l3 = S3Cache(bucket="myapp-cache", prefix="v1/") + +three_tier = ChainCache.build(l1, l2, l3, ttls=[60, 3600, 86400]) + +@cache(86400, key="ml_features:{entity_id}", store=three_tier) +async def get_features(entity_id: str) -> dict: + return await feature_store.fetch(entity_id) +``` + +### Pattern 4 — Django / Sync Application + +```python +from django.http import JsonResponse +from advanced_caching import cache, InMemCache, InMemoryMetrics + +metrics = InMemoryMetrics() + +@cache(300, key="product:{product_id}", metrics=metrics) +def get_product(product_id: int) -> dict: + return Product.objects.values().get(pk=product_id) + + +def product_view(request, product_id): + product = get_product(product_id) + return JsonResponse(product) +``` + +### Pattern 5 — Conditional Caching (TTL by result) + +```python +@cache(0, key="order:{order_id}") # ttl=0 → bypass by default +def get_order(order_id: int) -> dict: + order = db.fetch_order(order_id) + if order["status"] == "completed": + # Cache completed orders indefinitely + get_order.store.set(f"order:{order_id}", order, ttl=86400) + return order +``` + +--- + +## 9. Performance Guide + +### Throughput by Operation + +```mermaid +xychart-beta horizontal + title "Throughput (M ops/s, Python 3.12, Apple M2, N=200k)" + x-axis ["bg.read local", "InMemCache.get", "@cache sync static", "@cache async static", "@cache callable λ", "@cache SWR stale", "@cache + metrics"] + y-axis "M ops/s" 0 --> 12 + bar [7.5, 10.3, 6.0, 4.9, 3.9, 2.9, 1.6] +``` + +### Hot Path Breakdown (`@cache` sync hit, 100k iterations) + +| Component | Time | % | +|-----------|------|---| +| `sync_wrapper` overhead | ~17 ms | 24% | +| `InMemCache.get()` dict lookup | ~10 ms | 14% | +| `_make_key_fn` (named key) | ~59 ms | 84% | +| `time.time()` syscall (×1) | ~6 ms | 9% | + +> **Key insight**: Named key templates (`"user:{user_id}"`) are the single biggest overhead. +> Use static keys where possible: `"feature_flags"` is 2.7× faster than `"flags:{name}"`. + +### Optimization Checklist + +```mermaid +flowchart TD + START[Optimizing?] --> K{Key style} + K -- named --> STATIC["Use static key or {} → +2.7×"] + K -- ok --> S{SWR needed?} + S -- no --> TTLONLY["ttl-only path — no get_entry overhead"] + S -- yes --> STYPE{stale window tuned?} + STYPE --> CHAIN{Store type} + CHAIN -- single process --> INMEM[InMemCache — fastest] + CHAIN -- multi process --> REDIS[RedisCache + connection pool] + CHAIN -- hot+warm --> CC[ChainCache L1 InMem + L2 Redis] + INMEM --> METRICS{Metrics needed?} + METRICS -- no --> NULL[Use NULL_METRICS] + METRICS -- yes --> INMM[InMemoryMetrics — low overhead] +``` + +### Built-in Optimizations + +- **Lock-free reads** in `InMemCache` — GIL guarantees `dict.get` atomicity; lock only on stale eviction. +- **TTL vs SWR code paths split at decoration time** — no runtime `if stale > 0` branch per call. +- **Single `time.time()` call** per cache hit (not two). +- **`__slots__`** on `InMemCache` — eliminates per-instance `__dict__` overhead. + +### Profiling Your Code + +```bash +# cProfile +uv run python -m cProfile -s cumulative tests/profile_decorators.py + +# Scalene (line-level CPU + memory) +uv pip install scalene +uv run scalene tests/profile_decorators.py + +# py-spy (sampling, no instrumentation overhead) +py-spy record -o profile.svg -- python tests/profile_decorators.py +``` + +### Benchmarks + +```bash +uv run python tests/benchmark.py +BENCH_N=500000 uv run python tests/benchmark.py +``` + +--- + +## 10. Configuration Reference + +### `@cache` Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `ttl` | `int \| float` | **required** | Time-to-live in seconds. `0` = bypass. | +| `key` | `str \| Callable` | **required** | Key template or callable. | +| `stale` | `int \| float` | `0` | SWR window. `> 0` enables stale-while-revalidate. | +| `store` | backend | `None` → `InMemCache()` | Instance, class, or factory callable. | +| `metrics` | `MetricsCollector` | `None` | Any metrics collector. | + +### `@bg` Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `interval` | `int \| float` | **required** | Seconds between refreshes. | +| `key` | `str` | **required** | Cache key (no template placeholders). | +| `ttl` | `int \| float \| None` | `None` → `interval * 2` | TTL of stored entry. | +| `store` | backend | `None` → `InMemCache()` | Cache backend. | +| `metrics` | `MetricsCollector` | `None` | Metrics collector. | +| `on_error` | `Callable[[Exception], None]` | logs warning | Called on refresh error. | +| `run_immediately` | `bool` | `True` | Populate cache before first request. | + +### `bg.write` / `bg.read` Parameters + +| Parameter | `bg.write` | `bg.read` | Description | +|-----------|-----------|----------|-------------| +| `key` | **required** | **required** | Cache key. | +| `interval` | **required** | `0` | Seconds between refreshes. | +| `ttl` | `None` | `None` | Entry TTL. | +| `store` | `None` → `InMemCache()` | `None` → auto-discover | Backend. | +| `metrics` | `None` | `None` | Metrics collector. | +| `on_error` | `None` | `None` | Error callback. | +| `run_immediately` | `True` | `True` | Run at registration. | + +### Storage Backends + +| Backend | Constructor | Serializer | Extra dep | +|---------|-------------|-----------|-----------| +| `InMemCache` | `InMemCache()` | n/a (stores Python objects) | none | +| `RedisCache` | `RedisCache(r, prefix=, serializer=)` | optional | `[redis]` | +| `ChainCache` | `ChainCache.build(*stores, ttls=[…])` | per backend | none | +| `HybridCache` | `HybridCache(l1_ttl=, l1_cache=, l2_ttl=, l2_cache=)` | per backend | none | +| `LocalFileCache` | `LocalFileCache(dir, serializer=)` | optional | none | +| `S3Cache` | `S3Cache(bucket=, prefix=, serializer=)` | optional | `[s3]` | +| `GCSCache` | `GCSCache(bucket=, prefix=, serializer=)` | optional | `[gcs]` | + +### Serializers + +| Serializer | Symbol | Best for | Extra dep | +|-----------|--------|---------|-----------| +| orjson (default) | `serializers.json` | JSON-safe data | none (bundled) | +| pickle | `serializers.pickle` | Any Python object | none | +| msgpack | `serializers.msgpack` | Large payloads | `[msgpack]` | +| protobuf | `serializers.protobuf(Cls)` | Cross-language schemas | `[protobuf]` | +| custom | `MySerializer()` | Anything | — | + +### Pattern Decision Tree + +```mermaid +flowchart TD + Q{What are you caching?} + + Q --> A[Function with args] + Q --> B[Zero-arg background data] + Q --> C[Cross-process shared data] + + A --> D{"Stale data\nacceptable?"} + D -- no --> TTL["@cache(ttl, key=…)"] + D -- yes --> SWR["@cache(ttl, stale=N, key=…)"] + + B --> BG["@bg(interval, key=…)"] + + C --> WR["bg.write + bg.read"] + WR --> SAME{Same process?} + SAME -- yes --> AUTO["bg.read(key) — auto-discovers store"] + SAME -- no --> EXPLICIT["bg.read(key, store=redis_store)"] +``` + +--- + +## 11. Examples + +All runnable examples live in `examples/`. Each is self-contained and executable with: + +```bash +uv run python examples/.py +``` + +### `quickstart.py` + +The fastest way to see every feature in one script. + +| Section | What it shows | +|---------|--------------| +| **TTL Cache** | `@cache(ttl, key="user:{user_id}")` — miss, hit, second key | +| **SWR** | `@cache(ttl, stale=N)` — serve stale + background refresh | +| **Background refresh** | `@bg(interval, key=)` — zero-latency reads | +| **Custom store** | `store=InMemCache()` (swap for `RedisCache` in prod) | +| **Metrics** | Shared `InMemoryMetrics`, `get_stats()` hit rates | +| **Invalidation** | `.invalidate(key)` and `.clear()` | +| **Callable keys** | 5 patterns: simple λ, multi-arg, conditional, hash, varargs | + +```bash +uv run python examples/quickstart.py +``` + +--- + +### `metrics_and_exporters.py` + +Deep dive into metrics — how to read the output, custom collectors, and per-layer ChainCache observability. + +| Section | What it shows | +|---------|--------------| +| **Shared `InMemoryMetrics`** | One collector across multiple functions; `get_stats()` table with hit rates and latency percentiles (p50/p95/p99) | +| **Custom `PrintMetrics`** | Minimal protocol implementation — logs every hit/miss to stdout | +| **`NULL_METRICS`** | Zero-overhead no-op; throughput comparison | +| **ChainCache per-layer** | Wrap each layer (L1:inmem, L2:redis, L3:s3) with `InstrumentedStorage`; watch hits/misses move up the chain as layers fill and evict | + +Sample output for the ChainCache section: + +``` +[cold start — all layers empty] +Layer hits misses sets hit_rate +----------- ----- ------ ---- -------- +L1:inmem 0 2 2 0% +L2:redis 0 2 2 0% +L3:s3 0 2 2 0% + +[L1 evicted — requests fall through to L2] +L1:inmem 2 4 4 33% +L2:redis 2 2 2 50% +L3:s3 0 2 2 0% +``` + +```bash +uv run python examples/metrics_and_exporters.py +``` + +--- + +### `serializers_example.py` + +Benchmarks the four serializer strategies on a `LocalFileCache` backend (disk I/O — Redis/InMem would be faster, making the serializer overhead even more visible). + +| Serializer | When to use | +|-----------|------------| +| `serializers.json` (orjson) | Default — fastest for JSON-safe data | +| `serializers.pickle` | Any Python object, no schema | +| `serializers.msgpack` | Large payloads — ~2× more compact than JSON | +| Custom `MySerializer` | Protobuf, Avro, Arrow, or any `dumps`/`loads` pair | + +```bash +uv run python examples/serializers_example.py +``` + +--- + +### `writer_reader.py` + +Demonstrates the **Single-Writer / Multi-Reader** pattern for sharing data across processes (or threads) with zero per-read latency. + +``` +Writer refreshes every 100 ms; readers poll from private mirrors. + +[writer] refreshed → {'USD': 1.0, 'EUR': 0.92, 'GBP': 0.79, 'ts': 1710...} +tick 1: fast_reader={'USD': 1.0, ...} slow_reader={'USD': 1.0, ...} +tick 2: ... +``` + +- `bg.write(interval, key=, store=redis_store)` — one writer, runs on a schedule +- `bg.read(key, interval=, store=redis_store)` — each reader gets a private local mirror, refreshed independently +- Readers **never block** — they return the last known value from their local copy + +```bash +uv run python examples/writer_reader.py +``` diff --git a/docs/metrics.md b/docs/metrics.md deleted file mode 100644 index 1476d38..0000000 --- a/docs/metrics.md +++ /dev/null @@ -1,246 +0,0 @@ -# Metrics Collection - -Optional metrics system with <1% overhead. Tracks hits, misses, latency, errors, and background refreshes. - -## Installation - -```bash - -uv pip install "advanced-caching" # Includes InMemoryMetrics -pip install "advanced-caching[opentelemetry]" # OpenTelemetry -uv pip install "advanced-caching[gcp-monitoring]" # GCP Cloud Monitoring -``` - -## Quick Start - -```python -from advanced_caching import TTLCache -from advanced_caching.metrics import InMemoryMetrics - -metrics = InMemoryMetrics() # Share across multiple functions - -@TTLCache.cached("user:{id}", ttl=60, metrics=metrics) -def get_user(id: int): - return {"id": id} - -# Query stats -stats = metrics.get_stats() -# Returns: hits, misses, hit_rate, latency percentiles, errors, memory, background_refresh -``` - -## Metrics Reference - -All metrics collectors track the following operations and expose them through their respective backends. - -| Metric Name | Type | What It Represents | When Recorded | Use Case | Labels/Dimensions | -|-------------|------|-------------------|---------------|----------|-------------------| -| **`cache.hits`** | Counter | Number of times data was successfully retrieved from cache without executing the underlying function | Every time a cache lookup finds valid (non-expired) data | Calculate cache effectiveness. High hit count indicates good cache utilization | `cache_name`, `operation` (always "get") | -| **`cache.misses`** | Counter | Number of times data was not found in cache or was expired, requiring function execution | When cache lookup fails (key not found or TTL expired) | Identify cold cache scenarios or TTL tuning needs. High miss rate may indicate TTL is too short | `cache_name`, `operation` (always "get") | -| **`cache.sets`** | Counter | Number of times data was written to cache after function execution | After the underlying function completes successfully and result is stored | Track cache write operations. Should roughly equal misses in normal operation | `cache_name`, `operation` (always "set") | -| **`cache.deletes`** | Counter | Number of explicit cache entry removals (not TTL expirations) | When cache entries are manually deleted or evicted by cache policy | Monitor cache invalidation patterns. Debug cache coherency issues | `cache_name`, `operation` (always "delete") | -| **`cache.hit_rate_percent`** | Gauge (Calculated) | Percentage of cache lookups that resulted in hits: `(hits / (hits + misses)) * 100` | Calculated on-demand (InMemoryMetrics) or periodically (exporters) | **Primary effectiveness metric.** Target: >80% for most apps, >95% for read-heavy workloads. Values: `95.5` = 95.5% from cache, `50.0` = half hit/miss, `0.0` = cold cache | `cache_name` | -| **`cache.operation.duration`** | Histogram/Timer | Time spent in cache operations (get, set, delete) in milliseconds. Provides p50, p95, p99, avg aggregations | For every cache operation, wrapping the storage backend call | Detect storage backend performance issues. Compare local vs remote cache (Redis, S3, GCS). **Example:** `get_p50_ms: 0.12` = fast in-memory, `get_p99_ms: 45.0` = 1% take up to 45ms (network spike?) | `cache_name`, `operation` (get/set/delete) | -| **`cache.errors`** | Counter | Number of errors encountered during cache operations | When cache operations raise exceptions (network failures, serialization errors, Redis connection issues) | Alert on storage backend failures. Identify problematic cache keys. Monitor Redis connection health. Breakdown by `error_type` (e.g., ConnectionError, TimeoutError) | `cache_name`, `operation`, `error_type` | -| **`cache.background_refresh`** | Counter (success/failure breakdown) | Number of background refresh operations for SWRCache (stale refresh) and BGCache (scheduled refresh) | **SWRCache:** When serving stale data triggers background refresh
**BGCache:** On every scheduled loader execution | Monitor SWR effectiveness (serving stale while updating). Track BGCache job reliability. High failure rate indicates unreliable data source, network issues, or function errors | `cache_name`, `status` (success/failure) | -| **`cache.memory.bytes`** | Gauge | Approximate memory usage of cached entries in bytes. Also provides `mb` (megabytes) and `entries` (item count) | Periodically or on-demand when using `InstrumentedStorage` wrapper | Prevent memory exhaustion in long-running processes. Size L1 cache appropriately in HybridCache. Trigger eviction at threshold | `cache_name` | -| **`cache.entry.count`** | Gauge | Number of entries currently stored in cache | Tracked alongside memory metrics | Monitor cache growth over time. Validate cache eviction policies. Estimate memory per entry (bytes / entries) | `cache_name` | - ---- - -## Metric Naming Conventions - -### InMemoryMetrics -Returns nested dictionary structure: -```json -{ - "uptime_seconds": 3600.5, - "caches": { - "get_user": { - "hits": 100, - "misses": 20, - "sets": 20, - "deletes": 5, - "hit_rate_percent": 83.33 - }, - "get_product": { - "hits": 50, - "misses": 10, - "sets": 10, - "deletes": 2, - "hit_rate_percent": 83.33 - } - }, - "latency": { - "get_user.get_p50_ms": 0.15, - "get_user.get_p95_ms": 2.5, - "get_user.get_p99_ms": 10.0, - "get_user.get_avg_ms": 0.8, - "get_product.get_p50_ms": 0.12, - "get_product.set_p50_ms": 1.2 - }, - "errors": { - "get_user.get": { - "ConnectionError": 5, - "TimeoutError": 2 - } - }, - "memory": { - "my_cache": { - "bytes": 1048576, - "mb": 1.0, - "entries": 100 - }, - "another_cache": { - "bytes": 524288, - "mb": 0.5, - "entries": 50 - } - }, - "background_refresh": { - "get_user": { - "success": 50, - "failure": 2 - } - } -} -``` - -**Note:** Metrics are tracked **per-cache-name** when using `InstrumentedStorage` wrapper. If you have multiple functions sharing the same metrics collector but using different storage backends, each will have its own memory entry under the cache name you provide to `InstrumentedStorage(storage, metrics, "cache_name")`. - -### OpenTelemetry -Metric names follow OpenTelemetry conventions: -- `cache.hits` (Counter with `cache_name` attribute) -- `cache.misses` (Counter with `cache_name` attribute) -- `cache.operation.duration` (Histogram with `cache_name`, `operation` attributes) - -### GCP Cloud Monitoring -Uses custom metric paths under your configured prefix: -- `custom.googleapis.com//hits` -- `custom.googleapis.com//misses` -- `custom.googleapis.com//latency` - -Labels: `cache_name`, `operation` - ---- - -## InMemoryMetrics - -Built-in collector for API endpoints. Zero external dependencies, thread-safe. - -```python -from fastapi import FastAPI - -app = FastAPI() -metrics = InMemoryMetrics() - -@app.get("/metrics") -async def get_metrics(): - return metrics.get_stats() -``` - -**Configuration:** -```python -metrics = InMemoryMetrics(max_latency_samples=1000) -metrics.reset() # Clear all stats -``` - -## Exporters - -### OpenTelemetry - -```python -from advanced_caching.exporters import OpenTelemetryMetrics -from opentelemetry import metrics -from opentelemetry.sdk.metrics import MeterProvider - -otel_metrics = OpenTelemetryMetrics(meter_name="myapp.cache") - -@TTLCache.cached("user:{id}", ttl=60, metrics=otel_metrics) -def get_user(id: int): - return {"id": id} -``` - -### GCP Cloud Monitoring - -```python -from advanced_caching.exporters import GCPCloudMonitoringMetrics - -gcp_metrics = GCPCloudMonitoringMetrics( - project_id="my-project", - metric_prefix="custom.googleapis.com/myapp/cache", - flush_interval=60.0, -) - -@TTLCache.cached("session:{id}", ttl=3600, metrics=gcp_metrics) -def get_session(id: str): - return {"id": id} -``` - -**Share client across collectors:** -```python -from google.cloud import monitoring_v3 - -client = monitoring_v3.MetricServiceClient() - -metrics1 = GCPCloudMonitoringMetrics(project_id="my-project", client=client) -metrics2 = GCPCloudMonitoringMetrics(project_id="my-project", client=client) -``` - -### Custom Exporters - -See [Custom Exporters Guide](custom-metrics-exporters.md) for Prometheus, StatsD, and Datadog examples. - -## Advanced Usage - -### Shared Metrics Collector - -**Share one collector across all cached functions** (recommended): - -```python -metrics = InMemoryMetrics() - -@TTLCache.cached("user:{id}", ttl=60, metrics=metrics) -def get_user(id: int): - return {"id": id} - -@TTLCache.cached("product:{id}", ttl=300, metrics=metrics) -def get_product(id: int): - return {"id": id} - -# Per-function stats in single collector -stats = metrics.get_stats() -# stats["caches"]["get_user"] → user cache metrics -# stats["caches"]["get_product"] → product cache metrics -``` - -### Memory Monitoring - -```python -from advanced_caching.storage import InstrumentedStorage, InMemCache - -cache = InstrumentedStorage(InMemCache(), metrics, "my_cache") - -@TTLCache.cached("key:{id}", storage=cache, ttl=60) -def get_data(id: int): - return {"id": id} -``` - -### Conditional Metrics - -```python -import os -from advanced_caching.metrics import NULL_METRICS, InMemoryMetrics - -metrics = InMemoryMetrics() if os.getenv("ENV") == "production" else NULL_METRICS -``` - -## Performance - -<1% overhead for InMemoryMetrics. Use `NULL_METRICS` for zero overhead in development. - -## API Reference - -- [`metrics.py`](../src/advanced_caching/metrics.py) - Core metrics (InMemoryMetrics, NullMetrics) -- [`exporters/otel.py`](../src/advanced_caching/exporters/otel.py) - OpenTelemetry -- [`exporters/gcp.py`](../src/advanced_caching/exporters/gcp.py) - GCP Cloud Monitoring -- [Custom Exporters Guide](custom-metrics-exporters.md) - Prometheus, StatsD, Datadog examples \ No newline at end of file diff --git a/docs/object-storage-caching.md b/docs/object-storage-caching.md deleted file mode 100644 index 86993bf..0000000 --- a/docs/object-storage-caching.md +++ /dev/null @@ -1,291 +0,0 @@ -# Object Storage Caching (S3 & GCS) - -`advanced-caching` supports using cloud object storage (AWS S3 and Google Cloud Storage) as cache backends. This is ideal for: -- **Large datasets**: Storing large serialized objects that don't fit in Redis. -- **Cost efficiency**: Cheaper storage costs compared to managed Redis clusters. -- **Shared caching**: Sharing cache across different services or regions (with appropriate latency considerations). - -## Installation - -You need to install the respective client libraries: - -```bash -# For AWS S3 -pip install boto3 - -# For Google Cloud Storage -pip install google-cloud-storage -``` - -## S3Cache (AWS) - -`S3Cache` uses AWS S3 buckets for storage. It is optimized to minimize API costs by checking object metadata (HEAD request) before downloading the full body. - -### Basic Usage - -```python -import boto3 -from advanced_caching import S3Cache, TTLCache - -# Initialize Boto3 client (or let S3Cache create one) -s3_client = boto3.client("s3") - -# Create the cache backend -s3_cache = S3Cache( - bucket="my-app-cache-bucket", - prefix="prod/users/", - s3_client=s3_client, - serializer="json" # or "pickle" (default) -) - -# Use it with a decorator -@TTLCache.cached("user:{}", ttl=3600, cache=s3_cache) -def get_user_report(user_id): - # ... expensive operation ... - return generate_pdf_report(user_id) -``` - -### Configuration Options - -| Parameter | Description | Default | -|-----------|-------------|---------| -| `bucket` | Name of the S3 bucket. | Required | -| `prefix` | Folder prefix for keys (e.g., `cache/`). | `""` | -| `s3_client` | Pre-configured `boto3.client("s3")`. | `None` (creates new) | -| `serializer` | Serialization format (`"pickle"`, `"json"`, or custom). | `"pickle"` | -| `compress` | Enable Gzip compression for values. | `True` | -| `compress_level` | Gzip compression level (1-9). | `6` | - -## GCSCache (Google Cloud) - -`GCSCache` uses Google Cloud Storage buckets. Like `S3Cache`, it leverages metadata to check for freshness efficiently. - -### Basic Usage - -```python -from google.cloud import storage -from advanced_caching import GCSCache, TTLCache - -# Initialize GCS client -client = storage.Client() - -# Create the cache backend -gcs_cache = GCSCache( - bucket="my-app-cache-bucket", - prefix="reports/", - client=client, - compress=True -) - -@TTLCache.cached("report:{}", ttl=86400, cache=gcs_cache) -def generate_daily_report(date_str): - return complex_calculation(date_str) -``` - -## Key Organization & File Structure - -When using object storage, cache keys are mapped directly to file paths (object keys) in the bucket. The final path is constructed as: `prefix + key`. - -### Single Function - -```python -# Prefix acts as a folder -cache = S3Cache(bucket="my-bucket", prefix="reports/daily/") - -@TTLCache.cached("2023-10-25", ttl=3600, cache=cache) -def get_report(date): ... -``` - -**Resulting S3 Key:** `reports/daily/2023-10-25` - -### Multiple Functions (Shared Bucket) - -To store data from multiple functions in the same bucket, use different **prefixes** or distinct **key templates** to avoid collisions. - -#### Option A: Different Prefixes (Recommended) - -Create separate cache instances for different logical groups. This keeps the bucket organized and allows for easier cleanup (e.g., deleting the `users/` folder). - -```python -# Cache for User data -user_cache = S3Cache(bucket="my-bucket", prefix="users/") - -# Cache for Product data -product_cache = S3Cache(bucket="my-bucket", prefix="products/") - -@TTLCache.cached("{user_id}", ttl=300, cache=user_cache) -def get_user(user_id): ... -# File: users/123 - -@TTLCache.cached("{prod_id}", ttl=300, cache=product_cache) -def get_product(prod_id): ... -# File: products/ABC -``` - -#### Option B: Shared Prefix with Namespaced Keys - -Use a single cache instance but namespace the keys in the decorator. - -```python -# Shared cache instance -shared_cache = S3Cache(bucket="my-bucket", prefix="cache/") - -@TTLCache.cached("users:{user_id}", ttl=300, cache=shared_cache) -def get_user(user_id): ... -# File: cache/users:123 - -@TTLCache.cached("products:{prod_id}", ttl=300, cache=shared_cache) -def get_product(prod_id): ... -# File: cache/products:ABC -``` - -> **Tip**: You can use slashes in your key templates to create subfolders dynamically. -> Example: `@TTLCache.cached("users/{user_id}/profile", ...)` with prefix `v1/` results in `v1/users/123/profile`. - -### Single-writer / multi-reader with BGCache - -If you only want one place to refresh data but many places to read it, split BGCache into a writer and readers: - -```python -from advanced_caching import BGCache, InMemCache - -# One writer (enforced: only one writer per key) -@BGCache.register_writer( - "daily_config", interval_seconds=300, run_immediately=True, cache=InMemCache() -) -def refresh_config(): - return load_big_config() # expensive - -# Many readers; call-time readers without dummy decorators -get_config = BGCache.get_reader("daily_config", cache=InMemCache()) - -# On a miss the reader returns None (no fallback logic is attached). - -# You can also source from a multi-level cache (e.g., ChainCache) if you want object storage behind Redis/L1. -``` - -This pattern keeps writes centralized while allowing multiple call-sites to share the cached value. - -## Multi-level chain (InMem -> Redis -> S3/GCS) - -Use `ChainCache` to compose multiple storage layers: - -```python -from advanced_caching import InMemCache, RedisCache, S3Cache, ChainCache - -chain = ChainCache([ - (InMemCache(), 60), - (RedisCache(redis_client), 300), - (S3Cache(bucket="my-cache"), 3600), -]) - -# Write-through all levels (TTL capped per level) -chain.set("daily_config", load_config(), ttl=7200) - -# Read-through promotes to faster levels -cfg = chain.get("daily_config") -``` - -### Dedupe writes (optional) - -- `S3Cache(..., dedupe_writes=True)` stores a hash in object metadata (`ac-hash`) and skips uploads when content is unchanged (adds a HEAD check). -- `GCSCache(..., dedupe_writes=True)` stores `ac-hash` metadata and skips uploads when unchanged. -- `RedisCache(..., dedupe_writes=True)` skips rewriting identical payloads and refreshes TTL when provided. - -Use dedupe when bandwidth/object-write cost matters and an extra HEAD/reload is acceptable. - -## Best Practices - -### 1. Use HybridCache for Performance & Cost - -Object storage has higher latency (50-200ms) compared to Redis (<5ms) or memory (nanoseconds). It also charges per API request. - -To mitigate this, wrap your object storage cache in a `HybridCache`. This uses local memory as L1 and S3/GCS as L2. - -```python -from advanced_caching import HybridCache, InMemCache, S3Cache - -# L1: Memory (fast, free reads) -# L2: S3 (persistent, shared, slower) -hybrid_cache = HybridCache( - l1_cache=InMemCache(), - l2_cache=S3Cache(bucket="my-cache"), - l1_ttl=60, # Keep in memory for 1 minute - l2_ttl=86400 # Keep in S3 for 1 day -) - -# 1. First call: Miss L1 -> Miss L2 -> Run Function -> Write S3 -> Write L1 -# 2. Second call (0-60s): Hit L1 (Instant, no S3 cost) -# 3. Third call (61s+): Miss L1 -> Hit L2 (Slower, S3 read cost) -> Write L1 -``` - -### 2. Enable Compression - -Both `S3Cache` and `GCSCache` enable Gzip compression by default (`compress=True`). -- **Pros**: Reduces storage costs and network transfer time. -- **Cons**: Slight CPU overhead for compression/decompression. -- **Recommendation**: Keep it enabled unless you are storing already-compressed data (like images or zip files). - -### 3. Cost Optimization (Metadata Checks) - -`advanced-caching` implements a "Metadata First" strategy: -- **`get()`**: Checks object metadata (freshness timestamp) *before* downloading the body. If the item is expired, it aborts the download, saving data transfer costs. -- **`exists()`**: Uses `HEAD` requests (S3) or metadata lookups (GCS) which are cheaper and faster than downloading the object. - -### 4. Serialization - -- **Pickle (Default)**: Fastest and supports almost any Python object. **Security Warning**: Only use pickle if you trust the data source (i.e., your own bucket). -- **JSON**: Portable and human-readable. Use this if other non-Python services need to read the cache. Requires `orjson` (installed automatically with `advanced-caching`). - -### 5. Permissions - -Ensure your application has the correct IAM permissions. - -**AWS S3 (IAM Policy):** -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:DeleteObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::my-cache-bucket", - "arn:aws:s3:::my-cache-bucket/*" - ] - } - ] -} -``` - -**Google Cloud Storage:** -Ensure the Service Account has `Storage Object Admin` or `Storage Object User` roles on the bucket. - -## FAQ - -### Why not store all keys for a function in a single file? - -You might wonder if it's better to store all cached results for `get_user` in a single `users.json` file instead of thousands of small files. - -**This is generally NOT recommended for dynamic caching.** - -1. **Race Conditions**: Object storage does not support partial updates. To update one user, you must download the whole file, update the dict, and re-upload. If two requests happen simultaneously, one will overwrite the other's changes. -2. **Performance**: Reading a single key requires downloading the entire dataset. -3. **Cost**: Re-uploading a 10MB file to update a 1KB record incurs unnecessary bandwidth and request costs. - -**Exception: Read-Only Static Data** -If you have a dataset that is generated once (e.g., a daily export) and only read by your app, storing it as a single file is efficient. In this case, use `BGCache` to load the entire file into memory at once, rather than using `S3Cache` as a backend. - -```python -# Efficient for single-file read-only datasets -@BGCache.register_loader("daily_config", interval_seconds=3600) -def load_config(): - # Download big JSON once, keep in memory - obj = s3.get_object(Bucket="...", Key="config.json") - return json.loads(obj["Body"].read()) -``` diff --git a/examples/gcp_client_sharing_example.py b/examples/gcp_client_sharing_example.py deleted file mode 100644 index 442bb28..0000000 --- a/examples/gcp_client_sharing_example.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Example demonstrating GCP MetricServiceClient sharing across multiple metrics collectors. - -This shows: -1. Creating a single MetricServiceClient instance -2. Sharing it across multiple GCPCloudMonitoringMetrics collectors -3. Benefits: connection pooling, reduced resource usage - -Note: This example requires GCP credentials and won't run without them. - It's provided as a reference for production use. -""" - -# Uncomment to run (requires: pip install "advanced-caching[gcp-monitoring]") -""" -from advanced_caching import TTLCache, SWRCache -from advanced_caching.exporters import GCPCloudMonitoringMetrics -from google.cloud import monitoring_v3 - -# Create a single shared MetricServiceClient -# This reduces connection overhead and enables connection pooling -shared_client = monitoring_v3.MetricServiceClient() - -# Create separate metrics collectors for different services/namespaces -# All share the same underlying client connection -user_service_metrics = GCPCloudMonitoringMetrics( - project_id="my-gcp-project", - metric_prefix="custom.googleapis.com/users", - flush_interval=60.0, - client=shared_client, # Share client -) - -product_service_metrics = GCPCloudMonitoringMetrics( - project_id="my-gcp-project", - metric_prefix="custom.googleapis.com/products", - flush_interval=60.0, - client=shared_client, # Share client -) - -order_service_metrics = GCPCloudMonitoringMetrics( - project_id="my-gcp-project", - metric_prefix="custom.googleapis.com/orders", - flush_interval=60.0, - client=shared_client, # Share client -) - - -# User service functions -@TTLCache.cached("user:{id}", ttl=60, metrics=user_service_metrics) -def get_user(id: int): - return {"id": id, "name": f"User_{id}"} - - -# Product service functions -@TTLCache.cached("product:{id}", ttl=300, metrics=product_service_metrics) -def get_product(id: int): - return {"id": id, "name": f"Product_{id}"} - - -# Order service functions -@SWRCache.cached("order:{id}", ttl=120, stale_ttl=600, metrics=order_service_metrics) -def get_order(id: int): - return {"id": id, "status": "shipped"} - - -# Benefits of client sharing: -# 1. Single TCP connection pool shared across all collectors -# 2. Reduced memory footprint (one client vs multiple) -# 3. Better connection reuse and performance -# 4. Easier credential management (configure once) -# 5. All collectors still use shared APScheduler (no extra threads) - -print("GCP client sharing configured!") -print("- user_service_metrics → custom.googleapis.com/users/*") -print("- product_service_metrics → custom.googleapis.com/products/*") -print("- order_service_metrics → custom.googleapis.com/orders/*") -print("- All share one MetricServiceClient connection") -print("- All use shared APScheduler for background flushing") -""" - -print(__doc__) -print("\nTo use this pattern:") -print("1. Install: pip install 'advanced-caching[gcp-monitoring]'") -print("2. Set up GCP credentials") -print("3. Uncomment the code above") diff --git a/examples/metrics_and_exporters.py b/examples/metrics_and_exporters.py new file mode 100644 index 0000000..b1f1181 --- /dev/null +++ b/examples/metrics_and_exporters.py @@ -0,0 +1,243 @@ +""" +Metrics & exporters — complete guide. + +Covers: + 1. How InMemoryMetrics works and what get_stats() returns + 2. Shared collector across multiple functions + 3. Custom MetricsCollector (logger / Prometheus / etc.) + 4. NULL_METRICS for zero overhead + 5. Per-layer metrics with ChainCache (InMem → Redis → S3) + +Run: + uv run python examples/metrics_and_exporters.py +""" + +from __future__ import annotations + +import json +import time + +from advanced_caching import cache, InMemCache, ChainCache, InMemoryMetrics +from advanced_caching.metrics import NULL_METRICS +from advanced_caching.storage.utils import InstrumentedStorage + +# ── 1. How get_stats() output is structured ──────────────────────────────────── +# +# InMemoryMetrics.get_stats() returns: +# +# { +# "uptime_seconds": 12.3, +# "caches": { +# "": { +# "hits": 7, "misses": 3, "sets": 3, "deletes": 0, +# "hit_rate_percent": 70.0 +# } +# }, +# "latency": { +# ".get": {"count": 10, "p50_ms": 0.01, "p95_ms": 0.05, "p99_ms": 0.12, "avg_ms": 0.02} +# ".set": {"count": 3, "p50_ms": 0.02, ...} +# }, +# "errors": { +# ".get": {"RedisConnectionError": 2} +# }, +# "memory": { +# "": {"bytes": 4096, "entries": 3, "mb": 0.004} +# }, +# "background_refresh": { +# "": {"success": 15, "failure": 0} +# } +# } +# +# The "caches" key groups by function (or cache_name). "latency" shows +# per-operation percentiles (p50/p95/p99) measured in milliseconds. + + +# ── 2. Shared collector across multiple functions ───────────────────────────── + +metrics = InMemoryMetrics() + + +@cache(60, key="user:{user_id}", metrics=metrics) +def get_user(user_id: int) -> dict: + return {"id": user_id, "name": f"User{user_id}"} + + +@cache(300, key="product:{product_id}", metrics=metrics) +def get_product(product_id: int) -> dict: + return {"id": product_id, "price": 9.99} + + +@cache(3600, key="config:{}", metrics=metrics) +def get_config(key: str) -> dict: + return {"key": key, "value": "on"} + + +# ── 3. Custom MetricsCollector ──────────────────────────────────────────────── +# +# Implement any subset of the MetricsCollector protocol. +# Unused methods can be no-ops (`...`). + + +class PrintMetrics: + """Logs every cache hit/miss to stdout — useful for debugging.""" + + def record_hit(self, cache_name, key=None, metadata=None): + print(f" ✓ HIT {cache_name} key={key}") + + def record_miss(self, cache_name, key=None, metadata=None): + print(f" ✗ MISS {cache_name} key={key}") + + def record_set(self, cache_name, key=None, value_size=None, metadata=None): ... + def record_delete(self, cache_name, key=None, metadata=None): ... + def record_latency( + self, cache_name, operation=None, duration_seconds=None, metadata=None + ): ... + def record_error( + self, cache_name, operation=None, error_type=None, metadata=None + ): ... + def record_memory_usage( + self, cache_name, bytes_used=None, entry_count=None, metadata=None + ): ... + def record_background_refresh( + self, cache_name, success=None, duration_seconds=None, metadata=None + ): ... + + +@cache(60, key="traced:{x}", metrics=PrintMetrics()) +def traced_fn(x: int) -> int: + return x * 2 + + +# ── 4. NULL_METRICS — zero overhead ─────────────────────────────────────────── +# +# Pass NULL_METRICS (or omit the metrics= arg entirely) on hot paths. +# Python optimises away the no-op calls. + + +@cache(60, key="fast:{}", metrics=NULL_METRICS) +def fast_fn(x: int) -> int: + return x + + +# ── 5. Per-layer ChainCache metrics ─────────────────────────────────────────── +# +# Wrap each layer with InstrumentedStorage *before* passing it to +# ChainCache.build(). Every layer gets its own cache_name, so get_stats() +# shows hits/misses/latency broken down by tier. +# +# Real production setup: +# L1 = InstrumentedStorage(InMemCache(), m, "L1:inmem") +# L2 = InstrumentedStorage(RedisCache(client), m, "L2:redis") +# L3 = InstrumentedStorage(S3Cache(s3, "bkt"), m, "L3:s3") +# chain = ChainCache.build(L1, L2, L3, ttls=[60, 300, 3600]) +# @cache(3600, key="catalog:{pg}", store=chain) +# +# Here we use three InMemCache instances as stand-ins for Redis and S3. + +chain_metrics = InMemoryMetrics() + +_l1 = InstrumentedStorage(InMemCache(), chain_metrics, "L1:inmem") +_l2 = InstrumentedStorage(InMemCache(), chain_metrics, "L2:redis") +_l3 = InstrumentedStorage(InMemCache(), chain_metrics, "L3:s3") +chain = ChainCache.build(_l1, _l2, _l3, ttls=[60, 300, 3600]) + + +@cache(3600, key="catalog:{page}", store=chain) +def get_catalog(page: int) -> list: + return [{"id": i} for i in range(page * 10, page * 10 + 10)] + + +def _print_chain_stats() -> None: + stats = chain_metrics.get_stats() + caches = stats.get("caches", {}) + print( + f"\n {'Layer':<14} {'hits':>5} {'misses':>6} {'sets':>5} {'hit_rate':>9}" + ) + print(f" {'-' * 14} {'-' * 5} {'-' * 6} {'-' * 5} {'-' * 9}") + for layer in ("L1:inmem", "L2:redis", "L3:s3"): + s = caches.get(layer, {}) + hits = s.get("hits", 0) + misses = s.get("misses", 0) + sets = s.get("sets", 0) + hit_rate = s.get("hit_rate_percent", 0.0) + print(f" {layer:<14} {hits:>5} {misses:>6} {sets:>5} {hit_rate:>8.0f}%") + + +def main() -> None: + # ── Section 2: shared metrics ──────────────────────────────────────────── + print("\n=== 2. Shared InMemoryMetrics across functions ===") + + for uid in [1, 2, 1, 3, 1]: # uid=1 hits twice, uid=2/3 miss once each + get_user(uid) + for pid in [10, 10, 11, 10]: # pid=10 hits twice, pid=11 misses once + get_product(pid) + get_config("dark_mode") # miss + get_config("dark_mode") # hit + + stats = metrics.get_stats() + print(f"\n {'Function':<35} {'hits':>5} {'misses':>6} {'hit_rate':>9}") + print(f" {'-' * 35} {'-' * 5} {'-' * 6} {'-' * 9}") + for name, s in stats.get("caches", {}).items(): + print( + f" {name:<35} {s['hits']:>5} {s['misses']:>6}" + f" {s['hit_rate_percent']:>8.0f}%" + ) + + # Latency percentiles (p50 / p95 / p99) per operation + print(f"\n {'Operation':<40} {'p50 ms':>7} {'p95 ms':>7} {'p99 ms':>7}") + print(f" {'-' * 40} {'-' * 7} {'-' * 7} {'-' * 7}") + for op, lat in stats.get("latency", {}).items(): + print( + f" {op:<40} {lat['p50_ms']:>7.3f} {lat['p95_ms']:>7.3f}" + f" {lat['p99_ms']:>7.3f}" + ) + + # ── Section 3: custom collector ────────────────────────────────────────── + print("\n=== 3. Custom PrintMetrics (hit/miss logging) ===") + traced_fn(5) # miss + traced_fn(5) # hit + traced_fn(6) # miss + + # ── Section 4: NULL_METRICS ─────────────────────────────────────────────── + print("\n=== 4. NULL_METRICS (zero overhead) ===") + fast_fn(1) # prime + n = 500_000 + t0 = time.perf_counter() + for _ in range(n): + fast_fn(1) + elapsed = time.perf_counter() - t0 + print(f" {n / elapsed / 1e6:.2f}M ops/s (no metric overhead)") + + # ── Section 5: per-layer ChainCache metrics ─────────────────────────────── + print("\n=== 5. ChainCache per-layer metrics (L1:inmem → L2:redis → L3:s3) ===") + + print("\n [cold start — all layers empty]") + get_catalog(0) # L1 miss, L2 miss, L3 miss → fetch from fn, set at all layers + get_catalog(1) + _print_chain_stats() + + print("\n [warm — L1 has both pages]") + get_catalog(0) # L1 hit (no deeper lookup) + get_catalog(1) # L1 hit + _print_chain_stats() + + # Simulate L1 expiry by clearing it directly + _l1._storage.clear() + print("\n [L1 evicted — requests fall through to L2]") + get_catalog(0) # L1 miss, L2 hit → promotes back to L1 + get_catalog(1) # L1 miss, L2 hit → promotes back to L1 + _print_chain_stats() + + # Simulate both L1 and L2 evicted + _l1._storage.clear() + _l2._storage.clear() + print("\n [L1+L2 evicted — requests fall through to L3]") + get_catalog(0) # L1 miss, L2 miss, L3 hit → promotes to L1+L2 + _print_chain_stats() + + print("\n Full get_stats() snapshot (JSON):") + print(" " + json.dumps(chain_metrics.get_stats(), indent=4).replace("\n", "\n ")) + + +if __name__ == "__main__": + main() diff --git a/examples/metrics_example.py b/examples/metrics_example.py deleted file mode 100644 index b867acd..0000000 --- a/examples/metrics_example.py +++ /dev/null @@ -1,180 +0,0 @@ -""" -Example demonstrating metrics collection with advanced_caching. - -This example shows how to use metrics with different decorators and exporters. -""" - -import asyncio -import time -from advanced_caching import TTLCache, SWRCache, BGCache -from advanced_caching.storage import InMemCache - - -def example_basic_metrics(): - """Example using MockMetrics for testing.""" - print("=== Example 1: Basic Metrics Collection ===\n") - - # Create a simple metrics collector (for demo purposes) - class SimpleMetrics: - def __init__(self): - self.hits = 0 - self.misses = 0 - self.sets = 0 - - def record_hit(self, cache_name, key=None, metadata=None): - self.hits += 1 - print(f"✓ Cache HIT for {cache_name}") - - def record_miss(self, cache_name, key=None, metadata=None): - self.misses += 1 - print(f"✗ Cache MISS for {cache_name}") - - def record_set(self, cache_name, key=None, value_size=None, metadata=None): - self.sets += 1 - print(f"→ Cache SET for {cache_name}") - - def record_delete(self, cache_name, key=None, metadata=None): - pass - - def record_latency(self, cache_name, operation, duration_seconds, metadata=None): - print(f"⏱ {cache_name}.{operation} took {duration_seconds*1000:.2f}ms") - - def record_error(self, cache_name, operation, error_type, metadata=None): - print(f"⚠ {cache_name}.{operation} error: {error_type}") - - def record_memory_usage(self, cache_name, bytes_used, entry_count=None, metadata=None): - print(f"💾 {cache_name} using {bytes_used} bytes ({entry_count} entries)") - - def record_background_refresh(self, cache_name, success, duration_seconds=None, metadata=None): - status = "✓" if success else "✗" - print(f"{status} Background refresh for {cache_name}") - - metrics = SimpleMetrics() - - # Use metrics with TTLCache - @TTLCache.cached("user:{}", ttl=60, metrics=metrics) - def get_user(user_id: int): - time.sleep(0.1) # Simulate DB query - return {"id": user_id, "name": f"User{user_id}"} - - print("First call (cold cache):") - result = get_user(123) - print(f"Result: {result}\n") - - print("Second call (warm cache):") - result = get_user(123) - print(f"Result: {result}\n") - - print(f"Total stats: {metrics.hits} hits, {metrics.misses} misses, {metrics.sets} sets\n") - - -def example_memory_tracking(): - """Example tracking memory usage of in-memory cache.""" - print("=== Example 2: Memory Usage Tracking ===\n") - - from advanced_caching.storage import InstrumentedStorage - - class MemoryTracker: - def record_hit(self, *args, **kwargs): - pass - def record_miss(self, *args, **kwargs): - pass - def record_set(self, *args, **kwargs): - pass - def record_delete(self, *args, **kwargs): - pass - def record_latency(self, *args, **kwargs): - pass - def record_error(self, *args, **kwargs): - pass - def record_background_refresh(self, *args, **kwargs): - pass - - def record_memory_usage(self, cache_name, bytes_used, entry_count=None, metadata=None): - mb = bytes_used / (1024 * 1024) - print(f"💾 Cache '{cache_name}': {mb:.2f} MB ({entry_count} entries)") - - tracker = MemoryTracker() - cache = InMemCache() - instrumented = InstrumentedStorage(cache, tracker, "my_cache") - - # Add some data - for i in range(100): - instrumented.set(f"key_{i}", "x" * 10000, ttl=60) - - # Check memory usage - usage = instrumented.get_memory_usage() - print(f"Average entry size: {usage['avg_entry_size']} bytes\n") - - -async def example_prometheus_metrics(): - """Example using Prometheus metrics (requires prometheus_client).""" - print("=== Example 3: Prometheus Metrics (requires 'prometheus_client') ===\n") - - try: - from advanced_caching.exporters import PrometheusMetrics - - # Create Prometheus metrics collector - metrics = PrometheusMetrics(namespace="myapp", subsystem="cache") - - @TTLCache.cached("product:{}", ttl=300, metrics=metrics) - async def get_product(product_id: int): - await asyncio.sleep(0.05) - return {"id": product_id, "name": f"Product {product_id}"} - - # Generate some traffic - for i in range(5): - result = await get_product(i) - print(f"Fetched: {result}") - - # Cache hits - for i in range(3): - result = await get_product(i) - print(f"Cached: {result}") - - print("\n✓ Metrics are being collected by Prometheus") - print(" Run prometheus_client.start_http_server(8000) to expose metrics") - print(" Then visit http://localhost:8000/metrics\n") - - except ImportError: - print("⚠ prometheus_client not installed. Run: pip install 'advanced-caching[prometheus]'\n") - - -def example_null_metrics(): - """Example showing zero-overhead NullMetrics for development.""" - print("=== Example 4: Zero-Overhead NullMetrics ===\n") - - from advanced_caching.metrics import NULL_METRICS - - @TTLCache.cached("config:{}", ttl=3600, metrics=NULL_METRICS) - def get_config(env: str): - return {"env": env, "debug": True} - - # Metrics are completely disabled - zero overhead - result = get_config("dev") - print(f"Config: {result}") - print("✓ No metrics overhead (perfect for development)\n") - - -def main(): - """Run all examples.""" - print("=" * 60) - print("Advanced Caching Metrics Examples") - print("=" * 60 + "\n") - - # Synchronous examples - example_basic_metrics() - example_memory_tracking() - example_null_metrics() - - # Async examples - print("Running async examples...") - asyncio.run(example_prometheus_metrics()) - - print("=" * 60) - print("Examples completed!") - print("=" * 60) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart.py b/examples/quickstart.py new file mode 100644 index 0000000..0def6b0 --- /dev/null +++ b/examples/quickstart.py @@ -0,0 +1,207 @@ +""" +Quick-start examples for advanced-caching. + +Covers: TTL, SWR, bg refresh, Redis, ChainCache, invalidation, serializers, metrics. + +Run: + uv run python examples/quickstart.py +""" + +from __future__ import annotations + +import asyncio +import time + +# ── 1. TTL Cache ───────────────────────────────────────────────────────────── +from advanced_caching import cache, InMemCache + + +@cache(60, key="user:{user_id}") +async def get_user(user_id: int) -> dict: + print(f" [db] fetching user {user_id}") + await asyncio.sleep(0.01) + return {"id": user_id, "name": f"User{user_id}"} + + +# ── 2. Stale-While-Revalidate ──────────────────────────────────────────────── +@cache(0.05, stale=10, key="price:{symbol}") +async def get_price(symbol: str) -> float: + print(f" [api] fetching {symbol}") + return 100.0 + len(symbol) + + +# ── 3. Background refresh ──────────────────────────────────────────────────── +from advanced_caching import bg + + +@bg(0.1, key="flags") +def load_flags() -> dict: + print(" [bg] refreshing flags") + return {"dark_mode": True, "v": time.time()} + + +# ── 4. Custom store (InMemCache used here; swap for RedisCache in production) ─ +custom_store = InMemCache() + + +@cache(120, key="catalog:{page}", store=custom_store) +async def get_catalog(page: int) -> list: + return [{"id": i} for i in range(page * 10, page * 10 + 10)] + + +# ── 5. Metrics ─────────────────────────────────────────────────────────────── +from advanced_caching import InMemoryMetrics + +metrics = InMemoryMetrics() + + +@cache(60, key="product:{}", metrics=metrics) +async def get_product(product_id: int) -> dict: + await asyncio.sleep(0.01) + return {"id": product_id, "price": 9.99} + + +# ── 6. Invalidation ────────────────────────────────────────────────────────── +@cache(60, key="session:{sid}") +def get_session(sid: str) -> dict: + return {"sid": sid, "valid": True} + + +# ── 7. Callable Keys ────────────────────────────────────────────────────────── +# A callable receives the same *args/**kwargs as the decorated function. +# Use when you need conditional namespacing, hashing, or complex composition. +import hashlib + + +# 7a. Simple lambda — mirrors a named-placeholder template but with full Python +@cache(60, key=lambda user_id: f"user:v2:{user_id}") +async def get_user_v2(user_id: int) -> dict: + return {"id": user_id, "source": "callable-key"} + + +# 7b. Multi-argument — combine tenant + resource for namespace isolation +@cache(60, key=lambda tenant, resource_id: f"{tenant}:resource:{resource_id}") +async def get_resource(tenant: str, resource_id: int) -> dict: + return {"tenant": tenant, "id": resource_id} + + +# 7c. Conditional key — different prefix for admin vs public access +@cache( + 60, + key=lambda resource_id, admin=False: ( + ("admin" if admin else "public") + f":res:{resource_id}" + ), +) +async def get_protected(resource_id: int, admin: bool = False) -> dict: + return {"id": resource_id, "admin": admin} + + +# 7d. Hash long / complex inputs (e.g. raw SQL query strings) +def _query_key(query: str) -> str: + digest = hashlib.sha256(query.encode()).hexdigest()[:16] + return f"query:{digest}" + + +@cache(30, key=_query_key) +async def run_query(query: str) -> list: + print(f" [db] running query: {query[:40]}…") + return [{"row": 1}] + + +# 7e. Variadic args — pick lang from positional or keyword +@cache(300, key=lambda *a, **k: f"i18n:{k.get('lang', a[0] if a else 'en')}") +async def get_translations(lang: str = "en") -> dict: + return {"lang": lang, "hello": "hello"} + + +# ── Runner ─────────────────────────────────────────────────────────────────── +async def main(): + print("\n=== 1. TTL Cache ===") + u = await get_user(1) + print(f" miss → {u}") + u = await get_user(1) + print(f" hit → {u}") + u = await get_user(2) + print(f" miss → {u}") + + print("\n=== 2. Stale-While-Revalidate ===") + p = await get_price("BTC") + print(f" miss → {p}") + await asyncio.sleep(0.06) # go stale + p = await get_price("BTC") + print(f" stale → {p} (background refresh triggered)") + await asyncio.sleep(0.05) + p = await get_price("BTC") + print(f" fresh → {p}") + + print("\n=== 3. Background refresh ===") + f = load_flags() + print(f" first → {f}") + await asyncio.sleep(0.25) + f = load_flags() + print(f" after bg refresh → {f}") + bg.shutdown() + + print("\n=== 4. Custom store ===") + c = await get_catalog(0) + print(f" page 0: {c[:2]}…") + c = await get_catalog(0) + print(f" page 0 (cached): {c[:2]}…") + + print("\n=== 5. Metrics ===") + for i in range(3): + await get_product(i) + for i in range(3): + await get_product(i) # all hits + stats = metrics.get_stats() + for name, s in stats.get("caches", {}).items(): + print( + f" {name}: {s['hits']} hits, {s['misses']} misses, {s['hit_rate_percent']:.0f}% hit rate" + ) + + print("\n=== 6. Invalidation ===") + get_session("abc") + get_session("abc") # hit + get_session.invalidate("abc") + s = get_session("abc") # miss again + print(f" after invalidate → {s}") + get_session.clear() + print(" store cleared") + + print("\n=== 7. Callable Keys ===") + # 7a. simple lambda + u = await get_user_v2(1) + u_hit = await get_user_v2(1) + print(f" lambda key miss → {u}") + print(f" lambda key hit → {u_hit}") + + # 7b. multi-arg tenant isolation + r_a = await get_resource("acme", 42) + r_b = await get_resource("beta", 42) # different key → independent cache + print(f" tenant acme → {r_a}") + print(f" tenant beta → {r_b} (separate cache entry)") + + # 7c. conditional prefix + pub = await get_protected(7) + adm = await get_protected(7, admin=True) # different key → independent cache + print(f" public → {pub}") + print(f" admin → {adm} (separate cache entry)") + + # 7d. hash key + q = "SELECT * FROM orders WHERE status = 'pending' AND created_at > '2024-01-01'" + rows = await run_query(q) + rows_hit = await run_query(q) + print(f" hash key miss → {rows}") + print(f" hash key hit → {rows_hit}") + + # 7e. variadic (positional or keyword) + t1 = await get_translations("fr") + t2 = await get_translations(lang="de") + t3 = await get_translations() # default "en" + print(f" lang=fr → {t1}") + print(f" lang=de → {t2}") + print(f" default → {t3}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/serializers_example.py b/examples/serializers_example.py new file mode 100644 index 0000000..abe2a60 --- /dev/null +++ b/examples/serializers_example.py @@ -0,0 +1,121 @@ +""" +Serializers example — orjson (default), msgpack, pickle, and custom. + +Serializers apply to backends that store bytes externally: RedisCache, +LocalFileCache, S3Cache, GCSCache. InMemCache stores Python objects directly +and needs no serialization. + +Run: + uv run python examples/serializers_example.py +""" + +from __future__ import annotations + +import tempfile +import time +from advanced_caching import cache +from advanced_caching import serializers +from advanced_caching.storage import LocalFileCache + +TMPDIR = tempfile.mkdtemp(prefix="ac_ser_") + + +# ── orjson (default) — fastest for JSON-serializable data ──────────────────── +json_store = LocalFileCache(TMPDIR + "/json", serializer=serializers.json) + + +@cache(60, key="json:{x}", store=json_store) +def compute_json(x: int) -> dict: + return {"x": x, "sq": x * x} + + +# ── Pickle — arbitrary Python objects, no schema ───────────────────────────── +pickle_store = LocalFileCache(TMPDIR + "/pkl", serializer=serializers.pickle) + + +@cache(60, key="pickle:{x}", store=pickle_store) +def compute_pickle(x: int) -> dict: + return {"x": x, "sq": x * x} + + +# ── MsgPack — compact binary, ~2× faster than JSON for large payloads ───────── +try: + serializers.msgpack.dumps({"test": 1}) # probe before creating store + msgpack_store = LocalFileCache(TMPDIR + "/msgpack", serializer=serializers.msgpack) + + @cache(60, key="msgpack:{x}", store=msgpack_store) + def compute_msgpack(x: int) -> dict: + return {"x": x, "sq": x * x} + + HAS_MSGPACK = True +except (ImportError, Exception): + HAS_MSGPACK = False + + +# ── Custom serializer ───────────────────────────────────────────────────────── +import json as _json + + +class CompactJson: + """Minimal custom serializer using stdlib json.""" + + def dumps(self, v: object) -> bytes: + return _json.dumps(v, separators=(",", ":")).encode() + + def loads(self, b: bytes) -> object: + return _json.loads(b) + + +custom_store = LocalFileCache(TMPDIR + "/custom", serializer=CompactJson()) + + +@cache(60, key="custom:{x}", store=custom_store) +def compute_custom(x: int) -> dict: + return {"x": x, "sq": x * x} + + +# ── Also show: RedisCache serializer usage (comment to run) ────────────────── +# import redis as _redis +# r = _redis.from_url("redis://localhost:6379", decode_responses=False) +# from advanced_caching import RedisCache +# redis_json_store = RedisCache(r, prefix="ser:", serializer=serializers.json) +# redis_msgpack = RedisCache(r, prefix="mp:", serializer=serializers.msgpack) + + +def bench(fn, n=10_000) -> float: + fn(1) # prime (write to disk) + fn(1) # warm (read from cache) + t0 = time.perf_counter() + for _ in range(n): + fn(1) + return time.perf_counter() - t0 + + +def main(): + print("\n=== Serializer Throughput Comparison (10k cached hits) ===\n") + print(" Backend: LocalFileCache (disk I/O; Redis/InMem would be faster)\n") + + results: dict[str, float] = { + "orjson (default)": bench(compute_json), + "pickle ": bench(compute_pickle), + "custom json ": bench(compute_custom), + } + if HAS_MSGPACK: + results["msgpack "] = bench(compute_msgpack) + else: + print( + " (msgpack not installed — skip: pip install 'advanced-caching[msgpack]')\n" + ) + + for name, elapsed in sorted(results.items(), key=lambda x: x[1]): + ops = 10_000 / elapsed + print(f" {name} {ops / 1e3:>6.1f}k ops/s ({elapsed * 1000:.0f} ms total)") + + print() + print(" Note: serializer choice matters most for Redis/S3/GCS backends.") + print(" InMemCache stores Python objects directly — no serialization overhead.") + print() + + +if __name__ == "__main__": + main() diff --git a/examples/shared_metrics_example.py b/examples/shared_metrics_example.py deleted file mode 100644 index 0214d6d..0000000 --- a/examples/shared_metrics_example.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Example demonstrating shared metrics collectors across multiple cached functions. - -This shows: -1. Single InMemoryMetrics collector shared across multiple functions -2. Each function's metrics tracked separately by cache_name -3. Exposing metrics via API endpoint -""" - -from advanced_caching import TTLCache, SWRCache -from advanced_caching.metrics import InMemoryMetrics -import json - -# Create a single shared metrics collector -metrics = InMemoryMetrics() - -# Multiple cached functions sharing the same metrics collector -@TTLCache.cached("user:{id}", ttl=60, metrics=metrics) -def get_user(id: int): - print(f" → Cache miss: fetching user {id} from database...") - return {"id": id, "name": f"User_{id}", "role": "admin"} - -@TTLCache.cached("product:{id}", ttl=300, metrics=metrics) -def get_product(id: int): - print(f" → Cache miss: fetching product {id} from database...") - return {"id": id, "name": f"Product_{id}", "price": 99.99} - -@SWRCache.cached("config:{key}", ttl=120, stale_ttl=600, metrics=metrics) -def get_config(key: str): - print(f" → Cache miss: fetching config {key}...") - return {"key": key, "value": "enabled"} - - -def main(): - print("=== Shared Metrics Collector Example ===\n") - - # Simulate cache operations - print("1. Cache operations:") - print(" get_user(1):", get_user(1)) # miss - print(" get_user(1):", get_user(1)) # hit - print(" get_user(2):", get_user(2)) # miss - - print("\n get_product(100):", get_product(100)) # miss - print(" get_product(100):", get_product(100)) # hit - print(" get_product(101):", get_product(101)) # miss - print(" get_product(101):", get_product(101)) # hit - - print("\n get_config('feature_x'):", get_config('feature_x')) # miss - print(" get_config('feature_x'):", get_config('feature_x')) # hit - - # Get aggregated stats - print("\n2. Aggregated metrics from single collector:") - stats = metrics.get_stats() - print(json.dumps(stats, indent=2)) - - # Show per-function breakdown - print("\n3. Per-function breakdown:") - for cache_name, cache_stats in stats.get("caches", {}).items(): - print(f"\n {cache_name}:") - print(f" - Hits: {cache_stats['hits']}") - print(f" - Misses: {cache_stats['misses']}") - print(f" - Hit rate: {cache_stats['hit_rate_percent']:.1f}%") - - -if __name__ == "__main__": - main() diff --git a/examples/writer_reader.py b/examples/writer_reader.py new file mode 100644 index 0000000..5d2f0d5 --- /dev/null +++ b/examples/writer_reader.py @@ -0,0 +1,59 @@ +""" +bg.write / bg.read — Single-Writer / Multi-Reader pattern. + +Simulates a background worker process writing to a shared store while +multiple reader "processes" (threads here) consume from their own local mirrors. + +Run: + uv run python examples/writer_reader.py +""" + +from __future__ import annotations + +import time +from advanced_caching import bg, InMemCache, InMemoryMetrics + +# ── Shared store (use RedisCache in production for cross-process sharing) ──── +shared_store = InMemCache() +metrics = InMemoryMetrics() + + +# ── Writer (one per key per process) ───────────────────────────────────────── +@bg.write( + 0.1, key="exchange_rates", store=shared_store, metrics=metrics, run_immediately=True +) +def refresh_rates() -> dict: + rates = {"USD": 1.0, "EUR": 0.92, "GBP": 0.79, "ts": time.time()} + print(f" [writer] refreshed → {rates}") + return rates + + +# ── Readers (each gets its own private local mirror) ───────────────────────── +# store= is optional when writer is in same process — auto-discovers +get_rates_fast = bg.read("exchange_rates", interval=0.1) # auto-discover +get_rates_slow = bg.read("exchange_rates", interval=0.5, store=shared_store) + + +def main(): + print("\n=== Writer / Reader Pattern ===") + print("Writer refreshes every 100 ms; readers poll from private mirrors.\n") + + time.sleep(0.15) # let writer run at least once + + for i in range(4): + fast = get_rates_fast() + slow = get_rates_slow() + print(f" tick {i + 1}: fast_reader={fast} slow_reader={slow}") + time.sleep(0.12) + + # Metrics report + stats = metrics.get_stats() + bg_stats = stats.get("background_refresh", {}) + print(f"\n Writer refresh stats: {bg_stats}") + + bg.shutdown() + print(" bg scheduler stopped.") + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index b9e9c29..2ae3afe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] -requires = ["hatchling>=1.25"] +requires = ["hatchling>=1.29.0"] build-backend = "hatchling.build" [project] name = "advanced-caching" -version = "0.3.0" +version = "1.0.0" description = "Production-ready composable caching with TTL, SWR, and background refresh patterns for Python." readme = "README.md" requires-python = ">=3.10" @@ -28,33 +28,33 @@ classifiers = [ "Typing :: Typed", ] dependencies = [ - "apscheduler>=3.10", - "orjson>=3.11.5", + "apscheduler>=3.11.2", + "orjson>=3.11.7", ] [project.optional-dependencies] -redis = ["redis>=5.0.0"] +redis = ["redis>=7.3.0"] dev = [ - "pytest>=8.2", - "pytest-cov>=4.0", + "pytest>=9.0.2", + "pytest-cov>=7.0.0", ] -tests = ["pytest", "pytest-asyncio", "pytest-cov"] -tests-s3 = ["moto[boto3]>=5.0.0"] -tests-gcs = ["google-cloud-storage>=2.10.0"] +tests = ["pytest>=9.0.2", "pytest-asyncio>=1.3.0", "pytest-cov>=7.0.0"] +tests-s3 = ["moto>=5.1.22"] +tests-gcs = ["google-cloud-storage>=3.9.0"] # Metrics exporters (optional) metrics = [] # Metapackage for core metrics (in-memory collector, no external dependencies) opentelemetry = [ - "opentelemetry-api>=1.39.1", - "opentelemetry-sdk>=1.39.1", + "opentelemetry-api>=1.40.0", + "opentelemetry-sdk>=1.40.0", ] -gcp-monitoring = ["google-cloud-monitoring>=2.28.0"] +gcp-monitoring = ["google-cloud-monitoring>=2.29.1"] # Convenience extra for all supported exporters all-metrics = [ - "opentelemetry-api>=1.20.0", - "opentelemetry-sdk>=1.20.0", - "google-cloud-monitoring>=2.15.0", + "opentelemetry-api>=1.40.0", + "opentelemetry-sdk>=1.40.0", + "google-cloud-monitoring>=2.29.1", ] [project.urls] @@ -65,12 +65,12 @@ Issues = "https://github.com/agkloop/advanced_caching/issues" [dependency-groups] dev = [ - "pytest>=8.2", + "pytest>=9.0.2", "pytest-asyncio>=1.3.0", - "pytest-cov>=4.0", - "ruff>=0.14.8", - "scalene>=1.5.55", - "testcontainers[redis]>=4.0.0", + "pytest-cov>=7.0.0", + "ruff>=0.15.5", + "scalene>=2.1.4", + "testcontainers[redis]>=4.14.1", ] [tool.pytest.ini_options] diff --git a/src/advanced_caching/__init__.py b/src/advanced_caching/__init__.py index ce4d63f..b51edce 100644 --- a/src/advanced_caching/__init__.py +++ b/src/advanced_caching/__init__.py @@ -1,12 +1,68 @@ -""" -Advanced caching primitives: TTL decorators, SWR cache, and background loaders. +"""advanced_caching — fast, clean, composable caching for Python. + +Quick start:: + + from advanced_caching import cache, bg + + @cache(60, key="user:{user_id}") + async def get_user(user_id: int) -> dict: + return await db.fetch(user_id) + + @cache(60, stale=30, key="feed:{}") + async def get_feed(user_id: int) -> list: + return await db.fetch_feed(user_id) + + @bg(300, key="app_config") + async def load_config() -> dict: + return await fetch_remote_config() + +Custom stores:: + + from advanced_caching import cache, RedisCache, ChainCache, InMemCache + from advanced_caching import serializers + import redis + + redis_store = RedisCache( + redis.from_url("redis://localhost"), + prefix="myapp:", + serializer=serializers.json, + ) + tiered = ChainCache.build(InMemCache(), redis_store, ttls=[60, 3600]) -Expose storage backends, decorators, and scheduler utilities under `advanced_caching`. + @cache(3600, key="prices:{symbol}", store=tiered) + async def get_price(symbol: str) -> float: ... + +Metrics:: + + from advanced_caching import cache, InMemoryMetrics + + metrics = InMemoryMetrics() + + @cache(60, key="user:{}", metrics=metrics) + async def get_user(user_id): ... + + print(metrics.get_stats()) """ -__version__ = "0.3.0" +__version__ = "1.0.0" +from ._cache import cache, bg +from . import serializers +from .serializers import ( + Serializer, + PickleSerializer, + JsonSerializer, + MsgpackSerializer, + protobuf, + pack_entry, + unpack_entry, +) +from .metrics import InMemoryMetrics from .storage import ( + CacheEntry, + CacheStorage, + InstrumentedStorage, + validate_cache_storage, InMemCache, RedisCache, HybridCache, @@ -14,22 +70,28 @@ LocalFileCache, S3Cache, GCSCache, - CacheEntry, - CacheStorage, - validate_cache_storage, - PickleSerializer, - JsonSerializer, -) -from .decorators import ( - TTLCache, - AsyncTTLCache, - SWRCache, - AsyncStaleWhileRevalidateCache, - BGCache, - AsyncBackgroundCache, ) __all__ = [ + # Decorators + "cache", + "bg", + # Serializers + "serializers", + "Serializer", + "PickleSerializer", + "JsonSerializer", + "MsgpackSerializer", + "protobuf", + "pack_entry", + "unpack_entry", + # Metrics + "InMemoryMetrics", + # Storage + "CacheEntry", + "CacheStorage", + "InstrumentedStorage", + "validate_cache_storage", "InMemCache", "RedisCache", "HybridCache", @@ -37,15 +99,4 @@ "LocalFileCache", "S3Cache", "GCSCache", - "CacheEntry", - "CacheStorage", - "validate_cache_storage", - "PickleSerializer", - "JsonSerializer", - "TTLCache", - "AsyncTTLCache", - "SWRCache", - "AsyncStaleWhileRevalidateCache", - "BGCache", - "AsyncBackgroundCache", ] diff --git a/src/advanced_caching/_cache.py b/src/advanced_caching/_cache.py new file mode 100644 index 0000000..46a9ea3 --- /dev/null +++ b/src/advanced_caching/_cache.py @@ -0,0 +1,951 @@ +"""Two public symbols: ``cache`` and ``bg``. + +Quick start:: + + from advanced_caching import cache, bg + + # TTL cache — same for sync and async + @cache(60, key="user:{user_id}") + async def get_user(user_id: int) -> User: + return await db.fetch(user_id) + + # Stale-while-revalidate (serve stale for 30 s, refresh in background) + @cache(60, stale=30, key="feed:{}") + async def get_feed(user_id): ... + + # Background loader — auto-refreshed every 5 min + @bg(300, key="app_config") + async def load_config(): + return await fetch_remote_config() + + config = await load_config() # served from cache + + # Invalidation + get_user.invalidate(user_id=42) # delete one entry + get_user.clear() # wipe all entries +""" + +from __future__ import annotations + +import asyncio +import inspect +import logging +import time +from datetime import datetime, timedelta +from threading import Lock as _ThreadLock +from typing import Any, Callable, ClassVar, TypeVar + +from apscheduler.triggers.interval import IntervalTrigger + +from ._schedulers import SharedAsyncScheduler, SharedScheduler +from .metrics import MetricsCollector +from .storage import CacheEntry, CacheStorage, InMemCache, InstrumentedStorage + +F = TypeVar("F", bound=Callable[..., Any]) +T = TypeVar("T") + +logger = logging.getLogger(__name__) + + +# ────────────────────────────────────────────────────────────────────────────── +# Key generation +# ────────────────────────────────────────────────────────────────────────────── + + +def _make_key_fn( + key: str | Callable[..., str], + func: Callable[..., Any], +) -> Callable[..., str]: + """Build a fast cache-key function from a template string or callable.""" + if callable(key): + return key # type: ignore[return-value] + + template: str = key + + # Static key — no placeholders + if "{" not in template: + return lambda *a, **kw: template + + # Single positional placeholder "prefix:{}" — very common, optimise + if template.count("{}") == 1 and template.count("{") == 1: + prefix, suffix = template.split("{}", 1) + + def _pos(*args: Any, **kwargs: Any) -> str: + if args: + return f"{prefix}{args[0]}{suffix}" + if kwargs: + return f"{prefix}{next(iter(kwargs.values()))}{suffix}" + return template + + return _pos + + # Named / complex placeholders — inspect once at decoration time + sig = inspect.signature(func) + param_names = list(sig.parameters.keys()) + defaults = { + k: v.default + for k, v in sig.parameters.items() + if v.default is not inspect.Parameter.empty + } + + def _named(*args: Any, **kwargs: Any) -> str: + merged: dict[str, Any] = defaults.copy() if defaults else {} + if args: + merged.update(zip(param_names, args)) + if kwargs: + merged.update(kwargs) + try: + return template.format(**merged) + except (KeyError, ValueError, IndexError): + try: + return template.format(*args) + except Exception: + return template + except Exception: + return template + + return _named + + +# ────────────────────────────────────────────────────────────────────────────── +# Store normalisation +# ────────────────────────────────────────────────────────────────────────────── + + +def _resolve_store( + store: CacheStorage | Callable[[], CacheStorage] | None, +) -> CacheStorage: + """Accept a store instance, a factory callable, or None (→ InMemCache).""" + if store is None: + return InMemCache() + if callable(store) and not hasattr(store, "get"): + return store() # it's a factory / class + return store # type: ignore[return-value] + + +# ────────────────────────────────────────────────────────────────────────────── +# Wrapper metadata +# ────────────────────────────────────────────────────────────────────────────── + + +def _attach( + wrapper: Any, + func: Callable[..., Any], + *, + store: CacheStorage, + key_fn: Callable[..., str] | None = None, + static_key: str | None = None, +) -> None: + """Attach ``store``, ``invalidate``, ``clear``, and ``__wrapped__`` to wrapper.""" + wrapper.__wrapped__ = func + wrapper.__name__ = getattr(func, "__name__", "wrapper") + wrapper.__doc__ = func.__doc__ + wrapper.store = store + + if key_fn is not None: + _kf = key_fn + + def _inv(*args: Any, **kwargs: Any) -> None: + store.delete(_kf(*args, **kwargs)) + + wrapper.invalidate = _inv + elif static_key is not None: + _sk = static_key + + def _inv_static() -> None: # type: ignore[misc] + store.delete(_sk) + + wrapper.invalidate = _inv_static + + def _clear() -> None: + if hasattr(store, "clear"): + store.clear() # type: ignore[union-attr] + + wrapper.clear = _clear + + +# ────────────────────────────────────────────────────────────────────────────── +# @cache +# ────────────────────────────────────────────────────────────────────────────── + + +def cache( + ttl: int | float, + *, + key: str | Callable[..., str] = "{}", + stale: int | float = 0, + store: CacheStorage | Callable[[], CacheStorage] | None = None, + metrics: MetricsCollector | None = None, +) -> Callable[[F], F]: + """Cache decorator — works on sync **and** async functions. + + Args: + ttl: Time-to-live in seconds. ``0`` disables caching. + key: Key template (``"{user_id}"`` / ``"{}"`` / callable). + Defaults to ``"{}"`` — uses the first positional argument. + stale: Extra seconds to serve stale data while refreshing in background + (stale-while-revalidate pattern). ``0`` = pure TTL. + store: Cache backend. Pass an instance, a no-arg factory, or ``None`` + for a private :class:`~storage.InMemCache` per decorated function. + metrics: Optional :class:`~metrics.MetricsCollector`. + + The decorated function gains three attributes: + + * ``func.store`` — the :class:`~storage.utils.CacheStorage` instance. + * ``func.invalidate(*args, **kwargs)`` — delete a specific entry. + * ``func.clear()`` — wipe all entries in this cache. + + Examples:: + + @cache(60, key="user:{user_id}") + async def get_user(user_id: int) -> User: ... + + @cache(30, stale=60, key="prices:{}") + def get_prices(symbol: str) -> float: ... + + store = RedisCache(redis_client, prefix="myapp:") + + @cache(300, key="cfg", store=store) + async def get_config() -> dict: ... + """ + + def decorator(func: F) -> F: + key_fn = _make_key_fn(key, func) + cache_obj = _resolve_store(store) + if metrics is not None: + cache_obj = InstrumentedStorage( + cache_obj, metrics, func.__name__, {"decorator": "cache"} + ) + + # Bind hot-path callables once at decoration time + cache_get = cache_obj.get + cache_set = cache_obj.set + get_entry = cache_obj.get_entry + set_entry = cache_obj.set_entry + set_if_not_exists = cache_obj.set_if_not_exists + now_fn = time.time + _stale = stale # local alias avoids closure-cell lookup + + if asyncio.iscoroutinefunction(func): + if _stale > 0: + # ── async SWR ──────────────────────────────────────────────── + async def async_wrapper(*args: Any, **kwargs: Any) -> Any: + if ttl <= 0: + return await func(*args, **kwargs) + + cache_key = key_fn(*args, **kwargs) + now = now_fn() + entry = get_entry(cache_key, now) + + if entry is not None: + if now < entry.fresh_until: + return entry.value + if (now - entry.created_at) <= (ttl + _stale): + lock_key = f"{cache_key}:r" + if set_if_not_exists(lock_key, "1", _stale or 10): + + async def _bg_refresh() -> None: + try: + new_val = await func(*args, **kwargs) + t = now_fn() + set_entry( + cache_key, + CacheEntry( + value=new_val, + fresh_until=t + ttl, + created_at=t, + ), + ) + except Exception: + logger.exception( + "SWR refresh failed for %r", cache_key + ) + + asyncio.create_task(_bg_refresh()) + return entry.value + + result = await func(*args, **kwargs) + t = now_fn() + set_entry( + cache_key, + CacheEntry(value=result, fresh_until=t + ttl, created_at=t), + ) + return result + + else: + # ── async TTL-only (no SWR) — use cache_get for one fewer time.time() ── + async def async_wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc] + if ttl <= 0: + return await func(*args, **kwargs) + + cache_key = key_fn(*args, **kwargs) + value = cache_get(cache_key) + if value is not None: + return value + + result = await func(*args, **kwargs) + cache_set(cache_key, result, ttl) + return result + + _attach(async_wrapper, func, store=cache_obj, key_fn=key_fn) + return async_wrapper # type: ignore[return-value] + + # ── sync ────────────────────────────────────────────────────────────── + + if _stale > 0: + # ── sync SWR ───────────────────────────────────────────────────── + def sync_wrapper(*args: Any, **kwargs: Any) -> Any: + if ttl <= 0: + return func(*args, **kwargs) + + cache_key = key_fn(*args, **kwargs) + now = now_fn() + entry = get_entry(cache_key, now) + + if entry is not None: + if now < entry.fresh_until: + return entry.value + + if (now - entry.created_at) <= (ttl + _stale): + lock_key = f"{cache_key}:r" + if set_if_not_exists(lock_key, "1", _stale or 10): + + def _bg_refresh_sync() -> None: + try: + new_val = func(*args, **kwargs) + t = now_fn() + set_entry( + cache_key, + CacheEntry( + value=new_val, + fresh_until=t + ttl, + created_at=t, + ), + ) + except Exception: + logger.exception( + "SWR sync refresh failed for %r", cache_key + ) + + _sched = SharedScheduler.get_scheduler() + SharedScheduler.start() + _sched.add_job(_bg_refresh_sync) + return entry.value + + result = func(*args, **kwargs) + t = now_fn() + set_entry( + cache_key, + CacheEntry(value=result, fresh_until=t + ttl, created_at=t), + ) + return result + + else: + # ── sync TTL-only — skip get_entry(); use cache_get (one time.time() call) ── + def sync_wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc] + if ttl <= 0: + return func(*args, **kwargs) + + cache_key = key_fn(*args, **kwargs) + value = cache_get(cache_key) + if value is not None: + return value + + result = func(*args, **kwargs) + cache_set(cache_key, result, ttl) + return result + + _attach(sync_wrapper, func, store=cache_obj, key_fn=key_fn) + return sync_wrapper # type: ignore[return-value] + + return decorator # type: ignore[return-value] + + +# ────────────────────────────────────────────────────────────────────────────── +# @bg — background loader +# ────────────────────────────────────────────────────────────────────────────── + + +class bg: + """Background-refresh cache decorator and factory. + + **Loader** — decorate a zero-argument function; it will run on a fixed + schedule and serve cached results on every call:: + + @bg(300, key="config") + async def load_config(): + return await fetch_config() + + cfg = await load_config() # returns cached, refreshes in background + + **Writer / Reader** — useful when a single process writes to a shared + store (e.g. Redis) and many workers read from it locally:: + + @bg.write(60, key="prices") + async def refresh_prices(): + return await fetch_prices() + + get_prices = bg.read("prices", interval=60, store=redis_store) + prices = get_prices() + + **Shutdown**:: + + bg.shutdown() + """ + + _writer_registry: ClassVar[dict[str, Any]] = {} + + def __init__( + self, + interval: int, + *, + key: str, + ttl: int | float | None = None, + store: CacheStorage | Callable[[], CacheStorage] | None = None, + metrics: MetricsCollector | None = None, + run_immediately: bool = True, + on_error: Callable[[Exception], None] | None = None, + ) -> None: + self._interval = interval + self._key = key + self._ttl = ttl + self._store = store + self._metrics = metrics + self._run_immediately = run_immediately + self._on_error = on_error + + def __call__(self, func: Callable[[], T]) -> Callable[[], T]: + return _register_loader( + func, + key=self._key, + interval=self._interval, + ttl=self._ttl, + store=self._store, + metrics=self._metrics, + run_immediately=self._run_immediately, + on_error=self._on_error, + ) + + @classmethod + def write( + cls, + interval: int, + *, + key: str, + ttl: int | float | None = None, + store: CacheStorage | Callable[[], CacheStorage] | None = None, + metrics: MetricsCollector | None = None, + on_error: Callable[[Exception], None] | None = None, + run_immediately: bool = True, + ) -> Callable[[Callable[[], T]], Callable[[], T]]: + """Register a single writer for a shared cache key. + + Enforces that only one writer exists per key across the process. + Readers created with :meth:`read` and the same *key* will automatically + use this writer's store when *store* is omitted. + """ + + def decorator(func: Callable[[], T]) -> Callable[[], T]: + return _register_writer( + func, key, interval, ttl, store, metrics, on_error, run_immediately + ) + + return decorator + + @classmethod + def read( + cls, + key: str, + *, + interval: int = 0, + ttl: int | float | None = None, + store: CacheStorage | Callable[[], CacheStorage] | None = None, + metrics: MetricsCollector | None = None, + on_error: Callable[[Exception], None] | None = None, + run_immediately: bool = True, + ) -> Callable[[], T | None]: + """Create a read-only consumer that pulls from a shared cache. + + The reader keeps a fast local :class:`InMemCache` copy and syncs it + from *store* on a fixed schedule. + + If *store* is ``None`` **and** a writer was registered for *key* via + :meth:`write`, the writer's store is used automatically — you never + need to pass the store twice. + + Multiple readers for the same key each get an independent scheduler + job so they can run on different intervals without interfering. + """ + return _get_reader( + key, + interval=interval, + ttl=ttl, + store=store, + metrics=metrics, + on_error=on_error, + run_immediately=run_immediately, + ) + + @classmethod + def shutdown(cls, wait: bool = True) -> None: + """Stop all background schedulers and clear the writer registry.""" + SharedAsyncScheduler.shutdown(wait) + SharedScheduler.shutdown(wait) + cls._writer_registry.clear() + + +# ────────────────────────────────────────────────────────────────────────────── +# Internal helpers — bg internals +# ────────────────────────────────────────────────────────────────────────────── + + +def _resolve_bg_ttl(ttl: int | float | None, interval: int) -> int | float: + if interval <= 0: + return ttl or 0 + return ttl if ttl is not None else interval * 2 + + +def _register_loader( + func: Callable[[], T], + *, + key: str, + interval: int, + ttl: int | float | None, + store: CacheStorage | Callable[[], CacheStorage] | None, + metrics: MetricsCollector | None, + run_immediately: bool, + on_error: Callable[[Exception], None] | None, +) -> Callable[[], T]: + cache_key = key + if interval <= 0: + interval = 0 + effective_ttl: int | float = _resolve_bg_ttl(ttl, interval) + + cache_obj = _resolve_store(store) + if metrics is not None: + cache_obj = InstrumentedStorage( + cache_obj, metrics, cache_key, {"decorator": "bg"} + ) + + cache_get = cache_obj.get + cache_set = cache_obj.set + + def _handle_error(e: Exception) -> None: + if on_error: + try: + on_error(e) + except Exception: + logger.exception("bg on_error handler raised for key %r", cache_key) + else: + logger.exception("bg refresh failed for key %r", cache_key) + + if asyncio.iscoroutinefunction(func): + loader_lock: asyncio.Lock | None = None + initial_load_done = False + initial_load_task: asyncio.Task[None] | None = None + + if interval <= 0 or effective_ttl <= 0: + + async def async_passthrough() -> T: + return await func() + + _attach(async_passthrough, func, store=cache_obj, static_key=cache_key) + return async_passthrough # type: ignore[return-value] + + async def async_refresh() -> None: + t0 = time.monotonic() + try: + data = await func() + cache_set(cache_key, data, effective_ttl) + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=True, + duration_seconds=time.monotonic() - t0, + ) + except Exception as e: + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=False, + duration_seconds=time.monotonic() - t0, + ) + _handle_error(e) + + next_run_time: datetime | None = None + if run_immediately and cache_get(cache_key) is None: + try: + loop = asyncio.get_running_loop() + initial_load_task = loop.create_task(async_refresh()) + next_run_time = datetime.now() + timedelta(seconds=interval * 2) + except RuntimeError: + asyncio.run(async_refresh()) + initial_load_done = True + next_run_time = datetime.now() + timedelta(seconds=interval * 2) + + sched = SharedAsyncScheduler.get_scheduler() + SharedAsyncScheduler.ensure_started() + sched.add_job( + async_refresh, + trigger=IntervalTrigger(seconds=interval), + id=cache_key, + replace_existing=True, + next_run_time=next_run_time, + ) + + async def async_wrapper() -> T: + nonlocal loader_lock, initial_load_done, initial_load_task + value = cache_get(cache_key) + if value is not None: + return value + if loader_lock is None: + loader_lock = asyncio.Lock() + async with loader_lock: + value = cache_get(cache_key) + if value is not None: + return value + if not initial_load_done: + if initial_load_task is not None: + await initial_load_task + elif not run_immediately: + await async_refresh() + initial_load_done = True + value = cache_get(cache_key) + if value is not None: + return value + result = await func() + cache_set(cache_key, result, effective_ttl) + return result + + _attach(async_wrapper, func, store=cache_obj, static_key=cache_key) + return async_wrapper # type: ignore[return-value] + + # ── sync ────────────────────────────────────────────────────────────────── + + sync_lock = _ThreadLock() + sync_initial_load_done = False + + if interval <= 0 or effective_ttl <= 0: + + def sync_passthrough() -> T: + return func() + + _attach(sync_passthrough, func, store=cache_obj, static_key=cache_key) + return sync_passthrough + + def sync_refresh() -> None: + t0 = time.monotonic() + try: + data = func() + cache_set(cache_key, data, effective_ttl) + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=True, + duration_seconds=time.monotonic() - t0, + ) + except Exception as e: + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=False, + duration_seconds=time.monotonic() - t0, + ) + _handle_error(e) + + next_run_time_sync: datetime | None = None + if run_immediately and cache_get(cache_key) is None: + sync_refresh() + sync_initial_load_done = True + next_run_time_sync = datetime.now() + timedelta(seconds=interval * 2) + + sched_sync = SharedScheduler.get_scheduler() + SharedScheduler.start() + sched_sync.add_job( + sync_refresh, + trigger=IntervalTrigger(seconds=interval), + id=cache_key, + replace_existing=True, + next_run_time=next_run_time_sync, + ) + + def sync_wrapper() -> T: + nonlocal sync_initial_load_done + value = cache_get(cache_key) + if value is not None: + return value + with sync_lock: + value = cache_get(cache_key) + if value is not None: + return value + if not sync_initial_load_done: + if not run_immediately: + sync_refresh() + sync_initial_load_done = True + value = cache_get(cache_key) + if value is not None: + return value + result = func() + cache_set(cache_key, result, effective_ttl) + return result + + _attach(sync_wrapper, func, store=cache_obj, static_key=cache_key) + return sync_wrapper + + +def _register_writer( + func: Callable[[], T], + key: str, + interval: int, + ttl: int | float | None, + store: CacheStorage | Callable[[], CacheStorage] | None, + metrics: MetricsCollector | None, + on_error: Callable[[Exception], None] | None, + run_immediately: bool, +) -> Callable[[], T]: + cache_key = key + if cache_key in bg._writer_registry: + raise ValueError(f"bg writer already registered for key '{cache_key}'") + + if interval <= 0: + interval = 0 + effective_ttl: int | float = _resolve_bg_ttl(ttl, interval) + + cache_obj = _resolve_store(store) + if metrics is not None: + cache_obj = InstrumentedStorage( + cache_obj, metrics, cache_key, {"decorator": "bg.write"} + ) + cache_get = cache_obj.get + cache_set = cache_obj.set + + def _handle_error(e: Exception) -> None: + if on_error: + try: + on_error(e) + except Exception: + logger.exception("bg.write on_error raised for key %r", cache_key) + else: + logger.exception("bg.write failed for key %r", cache_key) + + if asyncio.iscoroutinefunction(func): + # Init lock at registration time — never lazily inside the coroutine + _alock = asyncio.Lock() + + async def _run_once_async() -> T: + async with _alock: + try: + data = await func() + cache_set(cache_key, data, effective_ttl) + return data + except Exception as e: + _handle_error(e) + raise + + async def _async_refresh() -> None: + t0 = time.monotonic() + try: + await _run_once_async() + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=True, + duration_seconds=time.monotonic() - t0, + ) + except Exception: + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=False, + duration_seconds=time.monotonic() - t0, + ) + + next_run_time: datetime | None = None + if run_immediately and cache_get(cache_key) is None: + try: + loop = asyncio.get_running_loop() + loop.create_task(_async_refresh()) + next_run_time = datetime.now() + timedelta(seconds=interval * 2) + except RuntimeError: + asyncio.run(_async_refresh()) + next_run_time = datetime.now() + timedelta(seconds=interval * 2) + + if interval > 0: + sched = SharedAsyncScheduler.get_scheduler() + SharedAsyncScheduler.ensure_started() + sched.add_job( + _async_refresh, + trigger=IntervalTrigger(seconds=interval), + id=cache_key, + replace_existing=True, + next_run_time=next_run_time, + ) + + async def writer_async() -> T: + value = cache_get(cache_key) + if value is not None: + return value # type: ignore[return-value] + return await _run_once_async() + + _attach(writer_async, func, store=cache_obj, static_key=cache_key) + bg._writer_registry[cache_key] = { + "cache": cache_obj, + "ttl": effective_ttl, + "wrapper": writer_async, + "is_async": True, + } + return writer_async # type: ignore[return-value] + + # ── sync writer ─────────────────────────────────────────────────────────── + + _slock = _ThreadLock() + + def _run_once_sync() -> T: + with _slock: + try: + data = func() + cache_set(cache_key, data, effective_ttl) + return data + except Exception as e: + _handle_error(e) + raise + + def _sync_refresh() -> None: + t0 = time.monotonic() + try: + _run_once_sync() + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=True, + duration_seconds=time.monotonic() - t0, + ) + except Exception: + if metrics is not None: + metrics.record_background_refresh( + cache_key, + success=False, + duration_seconds=time.monotonic() - t0, + ) + + next_run_time_sync: datetime | None = None + if run_immediately and cache_get(cache_key) is None: + _sync_refresh() + next_run_time_sync = datetime.now() + timedelta(seconds=interval * 2) + + if interval > 0: + sched_s = SharedScheduler.get_scheduler() + SharedScheduler.start() + sched_s.add_job( + _sync_refresh, + trigger=IntervalTrigger(seconds=interval), + id=cache_key, + replace_existing=True, + next_run_time=next_run_time_sync, + ) + + def writer_sync() -> T: + value = cache_get(cache_key) + if value is not None: + return value # type: ignore[return-value] + return _run_once_sync() + + _attach(writer_sync, func, store=cache_obj, static_key=cache_key) + bg._writer_registry[cache_key] = { + "cache": cache_obj, + "ttl": effective_ttl, + "wrapper": writer_sync, + "is_async": False, + } + return writer_sync + + +# Reader instance counter — makes every reader's scheduler job ID unique +_reader_counter = 0 + + +def _get_reader( + key: str, + *, + interval: int, + ttl: int | float | None, + store: CacheStorage | Callable[[], CacheStorage] | None, + metrics: MetricsCollector | None, + on_error: Callable[[Exception], None] | None, + run_immediately: bool = True, +) -> Callable[[], T | None]: + global _reader_counter + _reader_counter += 1 + reader_id = _reader_counter + + cache_key = key + + # ── Auto-discover writer's store when store=None ────────────────────────── + if store is None: + writer_entry = bg._writer_registry.get(cache_key) + if writer_entry is not None: + source_cache: CacheStorage = writer_entry["cache"] + if ttl is None: + ttl = writer_entry["ttl"] + else: + logger.debug( + "bg.read(%r): no writer registered and store=None — reader will always return None", + cache_key, + ) + source_cache = InMemCache() + else: + source_cache = _resolve_store(store) + + if interval <= 0: + interval = 0 + effective_ttl: int | float = _resolve_bg_ttl(ttl, interval) + + # Local in-memory mirror — hot path never touches the source store + local_cache = InMemCache() + source_get = source_cache.get + local_get = local_cache.get + local_set = local_cache.set + + def _handle_error(e: Exception) -> None: + if on_error: + try: + on_error(e) + except Exception: + logger.exception("bg.read on_error raised for key %r", cache_key) + else: + logger.exception("bg.read refresh failed for key %r", cache_key) + + def _load_once() -> None: + try: + value = source_get(cache_key) + if value is not None: + local_set(cache_key, value, effective_ttl) + except Exception as e: + _handle_error(e) + + if run_immediately and (effective_ttl > 0 or interval > 0): + _load_once() + + if interval > 0: + sched = SharedScheduler.get_scheduler() + SharedScheduler.start() + # Unique job ID per reader instance — prevents multiple readers from + # silently overwriting each other's scheduler job. + sched.add_job( + _load_once, + trigger=IntervalTrigger(seconds=interval), + id=f"reader:{cache_key}:{reader_id}", + replace_existing=True, + ) + + def reader() -> T | None: + # Hot path: local InMemCache hit (lock-free, ~0.18 µs) + value = local_get(cache_key) + if value is not None: + return value + # Cold path: pull from source on first call or after TTL expiry + _load_once() + return local_get(cache_key) + + _attach(reader, reader, store=local_cache, static_key=cache_key) + return reader # type: ignore[return-value] diff --git a/src/advanced_caching/_decorator_common.py b/src/advanced_caching/_decorator_common.py deleted file mode 100644 index 47fc2a4..0000000 --- a/src/advanced_caching/_decorator_common.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Internal helpers shared by caching decorators. - -This module is intentionally *not* part of the public API. - -Goals: -- Eliminate repeated cache-backend normalization patterns. -- Keep decorator hot paths small by binding frequently-used attributes once. -- Centralize wrapper metadata used by tests/debugging (`__wrapped__`, `_cache`, etc.). -""" - -from __future__ import annotations -from typing import Callable, TypeVar - -from .storage import CacheStorage, InMemCache - -T = TypeVar("T") - - -def normalize_cache_factory( - cache: CacheStorage | Callable[[], CacheStorage] | None, - *, - default_factory: Callable[[], CacheStorage] = InMemCache, -) -> Callable[[], CacheStorage]: - """Normalize a cache backend parameter into a no-arg factory. - - Accepted forms: - - None: use default_factory - - Callable[[], CacheStorage]: use as-is - - CacheStorage instance: wrap into a factory that returns the instance - - This keeps decorator code paths small and consistent. - """ - - if cache is None: - return default_factory - if callable(cache): - return cache # type: ignore[return-value] - - cache_instance = cache - - def factory() -> CacheStorage: - return cache_instance - - return factory - - -def attach_wrapper_metadata( - wrapper: Callable[..., T], - func: Callable[..., T], - *, - cache_obj: CacheStorage, - cache_key: str | None = None, -) -> None: - """Attach metadata fields used for debugging/tests. - - Notes: - - We intentionally avoid functools.wraps() here to keep decoration overhead - minimal and to preserve existing behavior. - """ - - wrapper.__wrapped__ = func # type: ignore[attr-defined] - wrapper.__name__ = func.__name__ # type: ignore[attr-defined] - wrapper.__doc__ = func.__doc__ # type: ignore[attr-defined] - wrapper._cache = cache_obj # type: ignore[attr-defined] - if cache_key is not None: - wrapper._cache_key = cache_key # type: ignore[attr-defined] diff --git a/src/advanced_caching/_schedulers.py b/src/advanced_caching/_schedulers.py index 46f2914..84e4931 100644 --- a/src/advanced_caching/_schedulers.py +++ b/src/advanced_caching/_schedulers.py @@ -30,6 +30,9 @@ def get_scheduler(cls) -> BackgroundScheduler: @classmethod def start(cls) -> None: + # Fast path: skip lock acquisition when already started. + if cls._started: + return with cls._lock: if not cls._started: cls.get_scheduler().start() @@ -66,6 +69,9 @@ def get_scheduler(cls) -> AsyncIOScheduler: @classmethod def ensure_started(cls) -> None: + # Fast path: skip lock acquisition when already started. + if cls._started: + return with cls._lock: if not cls._started: cls.get_scheduler().start() diff --git a/src/advanced_caching/decorators.py b/src/advanced_caching/decorators.py deleted file mode 100644 index 1e96b59..0000000 --- a/src/advanced_caching/decorators.py +++ /dev/null @@ -1,1063 +0,0 @@ -""" -Cache decorators for function result caching. - -Provides: -- TTLCache: Simple TTL-based caching -- SWRCache: Stale-while-revalidate pattern -- BGCache: Background scheduler-based loading with APScheduler -""" - -from __future__ import annotations - -import asyncio -import inspect -import logging -import time -from datetime import datetime, timedelta -from typing import Callable, TypeVar, Any -from dataclasses import dataclass - -from apscheduler.triggers.interval import IntervalTrigger - -from ._decorator_common import attach_wrapper_metadata, normalize_cache_factory -from ._schedulers import SharedAsyncScheduler, SharedScheduler -from .metrics import MetricsCollector, NULL_METRICS -from .storage import CacheEntry, CacheStorage, InMemCache, InstrumentedStorage - -T = TypeVar("T") - -# Minimal logger used only for error reporting (no debug/info on hot paths) -logger = logging.getLogger(__name__) - - -# Helper to normalize cache key builders for all decorators. -def _create_smart_key_fn( - key: str | Callable[..., str], func: Callable[..., Any] -) -> Callable[..., str]: - # If the key is already a function (e.g., lambda u: f"user:{u}"), return it directly. - if callable(key): - return key # type: ignore[assignment] - - template = key - # Optimization: Static key (e.g., "global_config") - # If there are no placeholders, we don't need to format anything. - if "{" not in template: - - def key_fn(*args, **kwargs) -> str: - # Always return the static string, ignoring arguments. - return template - - return key_fn - - # Optimization: Simple positional key "prefix:{}" (e.g., "user:{}") - # This is a very common pattern, so we optimize it to avoid full string formatting. - if template.count("{}") == 1 and template.count("{") == 1: - prefix, suffix = template.split("{}", 1) - - def key_fn(*args, **kwargs) -> str: - # If positional args are provided (e.g., get_user(123)), use the first one. - if args: - return f"{prefix}{args[0]}{suffix}" - # If keyword args are provided (e.g., get_user(user_id=123)), use the first value. - # This supports the case where a positional placeholder is used but the function is called with kwargs. - if kwargs: - # Fallback for single kwarg usage with positional template - return f"{prefix}{next(iter(kwargs.values()))}{suffix}" - # If no arguments are provided, return the raw template (e.g., "user:{}"). - return template - - return key_fn - - # General case: Named placeholders (e.g., "user:{id}") or complex positional (e.g., "{}:{}" or "{0}") - # We need to inspect the function signature to map positional arguments to parameter names. - sig = inspect.signature(func) - param_names = list(sig.parameters.keys()) - - # Pre-compute defaults to handle cases where arguments are omitted but have default values. - # e.g., def func(a=1): ... with key="{a}" - defaults = { - k: v.default - for k, v in sig.parameters.items() - if v.default is not inspect.Parameter.empty - } - - def key_fn(*args, **kwargs) -> str: - # Fast merge of arguments to support named placeholders. - # 1. Start with defaults (e.g., {'a': 1}) - merged = defaults.copy() if defaults else {} - - # 2. Map positional args to names (e.g., func(2) -> {'a': 2}) - # This allows us to use named placeholders even when the function is called positionally. - if args: - merged.update(zip(param_names, args)) - - # 3. Update with explicit kwargs (e.g., func(a=3) -> {'a': 3}) - if kwargs: - merged.update(kwargs) - - try: - # Try formatting with named arguments (e.g., "user:{id}".format(id=123)) - return template.format(**merged) - except (KeyError, ValueError, IndexError): - # Fallback: Try raw positional args (for "{}" templates or mixed usage) - # e.g., "user:{}".format(123) if named formatting failed. - try: - return template.format(*args) - except Exception: - # If formatting fails entirely, return the raw template to avoid crashing. - return template - except Exception: - # Catch-all for other formatting errors. - return template - - return key_fn - - -# ============================================================================ -# TTLCache - Simple TTL-based caching decorator -# ============================================================================ - - -class AsyncTTLCache: - """ - Simple TTL cache decorator (singleton pattern). - Each decorated function gets its own cache instance. - Supports both sync and async functions (preserves sync/async nature). - - Key templates (high-performance, simple): - - Positional placeholder: "user:{}" → first positional arg - - Named placeholder: "user:{user_id}" → keyword arg `user_id` - - Custom function: key=lambda *a, **k: ... - - Examples: - @TTLCache.cached("user:{}", ttl=60) - async def get_user(user_id): - return await db.fetch_user(user_id) - """ - - @classmethod - def configure( - cls, cache: CacheStorage | Callable[[], CacheStorage] - ) -> type[AsyncTTLCache]: - """ - Create a configured version of TTLCache with a default cache backend. - - Example: - MyCache = TTLCache.configure(cache=RedisCache(...)) - @MyCache.cached("key", ttl=60) - def func(): ... - """ - - class ConfiguredTTLCache(cls): - @classmethod - def cached( - cls_inner, - key: str | Callable[..., str], - ttl: int, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - ) -> Callable[[Callable[..., T]], Callable[..., T]]: - # Use the configured cache if none is provided - return cls.cached(key, ttl, cache=cache or cls_inner._configured_cache) - - ConfiguredTTLCache._configured_cache = cache # type: ignore - return ConfiguredTTLCache - - @classmethod - def cached( - cls, - key: str | Callable[..., str], - ttl: int, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - metrics: MetricsCollector | None = None, - ) -> Callable[[Callable[..., T]], Callable[..., T]]: - """ - Cache decorator with TTL. - - Args: - key: Cache key template (e.g., "user:{}") or generator function - ttl: Time-to-live in seconds - cache: Optional cache backend (defaults to InMemCache) - metrics: Optional metrics collector for instrumentation - """ - cache_factory = normalize_cache_factory(cache, default_factory=InMemCache) - - def decorator(func: Callable[..., T]) -> Callable[..., T]: - key_fn = _create_smart_key_fn(key, func) - cache_obj = cache_factory() - - # Wrap cache with instrumentation if metrics are provided - if metrics is not None: - cache_name = func.__name__ - cache_obj = InstrumentedStorage( - cache_obj, metrics, cache_name, {"decorator": "TTLCache"} - ) - - cache_get_entry = cache_obj.get_entry - cache_set = cache_obj.set - now_fn = time.time - - if asyncio.iscoroutinefunction(func): - - async def async_wrapper(*args, **kwargs) -> T: - if ttl <= 0: - return await func(*args, **kwargs) - - cache_key = key_fn(*args, **kwargs) - entry = cache_get_entry(cache_key) - if entry is not None: - if now_fn() < entry.fresh_until: - return entry.value - - result = await func(*args, **kwargs) - cache_set(cache_key, result, ttl) - return result - - attach_wrapper_metadata(async_wrapper, func, cache_obj=cache_obj) - return async_wrapper # type: ignore - - # Sync wrapper - def sync_wrapper(*args, **kwargs) -> T: - if ttl <= 0: - return func(*args, **kwargs) - - cache_key = key_fn(*args, **kwargs) - entry = cache_get_entry(cache_key) - if entry is not None: - if now_fn() < entry.fresh_until: - return entry.value - - result = func(*args, **kwargs) - cache_set(cache_key, result, ttl) - return result - - attach_wrapper_metadata(sync_wrapper, func, cache_obj=cache_obj) - return sync_wrapper - - return decorator - - -# Alias for easier import -TTLCache = AsyncTTLCache - - -# ============================================================================ -# SWRCache - Stale-While-Revalidate pattern -# ============================================================================ - - -class AsyncStaleWhileRevalidateCache: - """ - SWR (Stale-While-Revalidate) cache decorator. - Supports both sync and async functions. - """ - - @classmethod - def configure( - cls, cache: CacheStorage | Callable[[], CacheStorage] - ) -> type[AsyncStaleWhileRevalidateCache]: - """ - Create a configured version of SWRCache with a default cache backend. - """ - - class ConfiguredSWRCache(cls): - @classmethod - def cached( - cls_inner, - key: str | Callable[..., str], - ttl: int, - stale_ttl: int = 0, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - enable_lock: bool = True, - ) -> Callable[[Callable[..., T]], Callable[..., T]]: - return cls.cached( - key, - ttl, - stale_ttl=stale_ttl, - cache=cache or cls_inner._configured_cache, - enable_lock=enable_lock, - ) - - ConfiguredSWRCache._configured_cache = cache # type: ignore - return ConfiguredSWRCache - - @classmethod - def cached( - cls, - key: str | Callable[..., str], - ttl: int, - stale_ttl: int = 0, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - enable_lock: bool = True, - metrics: MetricsCollector | None = None, - ) -> Callable[[Callable[..., T]], Callable[..., T]]: - """ - SWR cache decorator. - - Args: - key: Cache key template or generator function - ttl: Fresh time in seconds - stale_ttl: Additional stale time in seconds (0 = no stale period) - cache: Optional cache backend (defaults to InMemCache) - enable_lock: Whether to use locking for refresh coordination - metrics: Optional metrics collector for instrumentation - """ - cache_factory = normalize_cache_factory(cache, default_factory=InMemCache) - - def decorator(func: Callable[..., T]) -> Callable[..., T]: - key_fn = _create_smart_key_fn(key, func) - cache_obj = cache_factory() - - # Wrap cache with instrumentation if metrics are provided - if metrics is not None: - cache_name = func.__name__ - cache_obj = InstrumentedStorage( - cache_obj, metrics, cache_name, {"decorator": "SWRCache"} - ) - - get_entry = cache_obj.get_entry - set_entry = cache_obj.set_entry - set_if_not_exists = cache_obj.set_if_not_exists - now_fn = time.time - - if asyncio.iscoroutinefunction(func): - create_task = asyncio.create_task - - async def async_wrapper(*args, **kwargs) -> T: - if ttl <= 0: - return await func(*args, **kwargs) - cache_key = key_fn(*args, **kwargs) - now = now_fn() - entry = get_entry(cache_key) - - if entry is None: - result = await func(*args, **kwargs) - created_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=result, - fresh_until=created_at + ttl, - created_at=created_at, - ), - ) - return result - - if now < entry.fresh_until: - return entry.value - - if (now - entry.created_at) > (ttl + stale_ttl): - result = await func(*args, **kwargs) - created_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=result, - fresh_until=created_at + ttl, - created_at=created_at, - ), - ) - return result - - if enable_lock: - lock_key = f"{cache_key}:refresh_lock" - if not set_if_not_exists(lock_key, "1", stale_ttl or 10): - return entry.value - - async def refresh_job() -> None: - refresh_start = time.perf_counter() - success = False - try: - new_value = await func(*args, **kwargs) - refreshed_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=new_value, - fresh_until=refreshed_at + ttl, - created_at=refreshed_at, - ), - ) - success = True - except Exception: - logger.exception( - "Async SWR background refresh failed for key %r", - cache_key, - ) - finally: - if metrics is not None: - refresh_duration = time.perf_counter() - refresh_start - metrics.record_background_refresh( - func.__name__, - success, - refresh_duration, - {"decorator": "SWRCache", "key": cache_key}, - ) - - create_task(refresh_job()) - return entry.value - - attach_wrapper_metadata(async_wrapper, func, cache_obj=cache_obj) - return async_wrapper # type: ignore - - # Sync wrapper - def sync_wrapper(*args, **kwargs) -> T: - if ttl <= 0: - return func(*args, **kwargs) - cache_key = key_fn(*args, **kwargs) - now = now_fn() - entry = get_entry(cache_key) - - if entry is None: - result = func(*args, **kwargs) - created_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=result, - fresh_until=created_at + ttl, - created_at=created_at, - ), - ) - return result - - if now < entry.fresh_until: - return entry.value - - if (now - entry.created_at) > (ttl + stale_ttl): - result = func(*args, **kwargs) - created_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=result, - fresh_until=created_at + ttl, - created_at=created_at, - ), - ) - return result - - if enable_lock: - lock_key = f"{cache_key}:refresh_lock" - if not set_if_not_exists(lock_key, "1", stale_ttl or 10): - return entry.value - - def refresh_job() -> None: - refresh_start = time.perf_counter() - success = False - try: - new_value = func(*args, **kwargs) - refreshed_at = now_fn() - set_entry( - cache_key, - CacheEntry( - value=new_value, - fresh_until=refreshed_at + ttl, - created_at=refreshed_at, - ), - ) - success = True - except Exception: - logger.exception( - "Sync SWR background refresh failed for key %r", cache_key - ) - finally: - if metrics is not None: - refresh_duration = time.perf_counter() - refresh_start - metrics.record_background_refresh( - func.__name__, - success, - refresh_duration, - {"decorator": "SWRCache", "key": cache_key}, - ) - - # Run refresh in background using SharedScheduler - scheduler = SharedScheduler.get_scheduler() - SharedScheduler.start() - scheduler.add_job(refresh_job) - return entry.value - - attach_wrapper_metadata(sync_wrapper, func, cache_obj=cache_obj) - return sync_wrapper - - return decorator - - -SWRCache = AsyncStaleWhileRevalidateCache - - -# Schedulers are implemented as internal singletons in `advanced_caching._schedulers`. - - -# ============================================================================ -# BGCache - Background cache loader decorator -# ============================================================================ - - -class AsyncBackgroundCache: - """Background cache loader that uses APScheduler (AsyncIOScheduler for async, BackgroundScheduler for sync).""" - - # Global registry to enforce single writer per cache key across all configured BGCache classes. - _writer_registry: dict[str, "_WriterRecord"] = {} - - @classmethod - def shutdown(cls, wait: bool = True) -> None: - SharedAsyncScheduler.shutdown(wait) - SharedScheduler.shutdown(wait) - cls._writer_registry.clear() - - @classmethod - def configure( - cls, cache: CacheStorage | Callable[[], CacheStorage] - ) -> type[AsyncBackgroundCache]: - """ - Create a configured version of BGCache with a default cache backend. - """ - - class ConfiguredBGCache(cls): - @classmethod - def register_loader( - cls_inner, - key: str, - interval_seconds: int, - ttl: int | None = None, - run_immediately: bool = True, - on_error: Callable[[Exception], None] | None = None, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - ) -> Callable[[Callable[[], T]], Callable[[], T]]: - return cls.register_loader( - key, - interval_seconds, - ttl=ttl, - run_immediately=run_immediately, - on_error=on_error, - cache=cache or cls_inner._configured_cache, - ) - - ConfiguredBGCache._configured_cache = cache # type: ignore - return ConfiguredBGCache - - @classmethod - def register_loader( - cls, - key: str, - interval_seconds: int, - ttl: int | None = None, - run_immediately: bool = True, - on_error: Callable[[Exception], None] | None = None, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - metrics: MetricsCollector | None = None, - ) -> Callable[[Callable[[], T]], Callable[[], T]]: - """ - Register a background loader function. - - Args: - key: Cache key for the loaded data - interval_seconds: Refresh interval in seconds (0 = no background refresh) - ttl: Optional TTL for cached data (defaults to 2x interval_seconds) - run_immediately: Whether to load data immediately on first access - on_error: Optional error handler callback - cache: Optional cache backend (defaults to InMemCache) - metrics: Optional metrics collector for instrumentation - """ - cache_key = key - if interval_seconds <= 0: - interval_seconds = 0 - if ttl is None and interval_seconds > 0: - ttl = interval_seconds * 2 - if ttl is None: - ttl = 0 - - cache_factory = normalize_cache_factory(cache, default_factory=InMemCache) - cache_obj = cache_factory() - - # Wrap cache with instrumentation if metrics are provided - if metrics is not None: - cache_obj = InstrumentedStorage( - cache_obj, metrics, cache_key, {"decorator": "BGCache"} - ) - - cache_get = cache_obj.get - cache_set = cache_obj.set - - def decorator(loader_func: Callable[[], T]) -> Callable[[], T]: - if asyncio.iscoroutinefunction(loader_func): - loader_lock: asyncio.Lock | None = None - initial_load_done = False - initial_load_task: asyncio.Task[None] | None = None - - if interval_seconds <= 0 or ttl <= 0: - - async def async_wrapper() -> T: - return await loader_func() - - attach_wrapper_metadata( - async_wrapper, - loader_func, - cache_obj=cache_obj, - cache_key=cache_key, - ) - return async_wrapper # type: ignore - - async def refresh_job() -> None: - refresh_start = time.perf_counter() - success = False - try: - data = await loader_func() - cache_set(cache_key, data, ttl) - success = True - except Exception as e: - if on_error: - try: - on_error(e) - except Exception: - logger.exception( - "Async BGCache error handler failed for key %r", - cache_key, - ) - else: - logger.exception( - "Async BGCache refresh job failed for key %r", cache_key - ) - finally: - if metrics is not None: - refresh_duration = time.perf_counter() - refresh_start - metrics.record_background_refresh( - cache_key, - success, - refresh_duration, - {"decorator": "BGCache", "key": cache_key}, - ) - - next_run_time: datetime | None = None - - if run_immediately: - if cache_get(cache_key) is None: - try: - loop = asyncio.get_running_loop() - except RuntimeError: - asyncio.run(refresh_job()) - initial_load_done = True - next_run_time = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - else: - initial_load_task = loop.create_task(refresh_job()) - next_run_time = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - - scheduler = SharedAsyncScheduler.get_scheduler() - SharedAsyncScheduler.ensure_started() - scheduler.add_job( - refresh_job, - trigger=IntervalTrigger(seconds=interval_seconds), - id=cache_key, - replace_existing=True, - next_run_time=next_run_time, - ) - - async def async_wrapper() -> T: - nonlocal loader_lock, initial_load_done, initial_load_task - - value = cache_get(cache_key) - if value is not None: - return value - - # Miss path: serialize initial load / fallback loads. - # We create the asyncio.Lock lazily to avoid requiring a running - # loop at decoration/import time. - if loader_lock is None: - loader_lock = asyncio.Lock() - async with loader_lock: - value = cache_get(cache_key) - if value is not None: - return value - - # If we scheduled an initial refresh task, wait for it once. - if not initial_load_done: - if initial_load_task is not None: - await initial_load_task - elif not run_immediately: - await refresh_job() - initial_load_done = True - - value = cache_get(cache_key) - if value is not None: - return value - result = await loader_func() - cache_set(cache_key, result, ttl) - return result - - attach_wrapper_metadata( - async_wrapper, loader_func, cache_obj=cache_obj, cache_key=cache_key - ) - return async_wrapper # type: ignore - - # Sync wrapper - from threading import Lock - - sync_lock = Lock() - sync_initial_load_done = False - - if interval_seconds <= 0 or ttl <= 0: - - def sync_wrapper() -> T: - return loader_func() - - attach_wrapper_metadata( - sync_wrapper, loader_func, cache_obj=cache_obj, cache_key=cache_key - ) - return sync_wrapper - - def sync_refresh_job() -> None: - refresh_start = time.perf_counter() - success = False - try: - data = loader_func() - cache_set(cache_key, data, ttl) - success = True - except Exception as e: - if on_error: - try: - on_error(e) - except Exception: - logger.exception( - "Sync BGCache error handler failed for key %r", - cache_key, - ) - else: - logger.exception( - "Sync BGCache refresh job failed for key %r", cache_key - ) - finally: - if metrics is not None: - refresh_duration = time.perf_counter() - refresh_start - metrics.record_background_refresh( - cache_key, - success, - refresh_duration, - {"decorator": "BGCache", "key": cache_key}, - ) - - next_run_time_sync: datetime | None = None - - if run_immediately: - if cache_get(cache_key) is None: - sync_refresh_job() - sync_initial_load_done = True - next_run_time_sync = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - - scheduler_sync = SharedScheduler.get_scheduler() - SharedScheduler.start() - scheduler_sync.add_job( - sync_refresh_job, - trigger=IntervalTrigger(seconds=interval_seconds), - id=cache_key, - replace_existing=True, - next_run_time=next_run_time_sync, - ) - - def sync_wrapper_fn() -> T: - nonlocal sync_initial_load_done - value = cache_get(cache_key) - if value is not None: - return value - - with sync_lock: - value = cache_get(cache_key) - if value is not None: - return value - - if not sync_initial_load_done: - if not run_immediately: - sync_refresh_job() - sync_initial_load_done = True - - value = cache_get(cache_key) - if value is not None: - return value - result = loader_func() - cache_set(cache_key, result, ttl) - return result - - attach_wrapper_metadata( - sync_wrapper_fn, loader_func, cache_obj=cache_obj, cache_key=cache_key - ) - return sync_wrapper_fn - - return decorator - - @classmethod - def register_writer( - cls, - key: str, - interval_seconds: int, - ttl: int | None = None, - run_immediately: bool = True, - on_error: Callable[[Exception], None] | None = None, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - ) -> Callable[[Callable[[], T]], Callable[[], T]]: - cache_key = key - if cache_key in cls._writer_registry: - raise ValueError(f"BGCache writer already registered for key '{cache_key}'") - - if interval_seconds <= 0: - interval_seconds = 0 - if ttl is None and interval_seconds > 0: - ttl = interval_seconds * 2 - if ttl is None: - ttl = 0 - - cache_factory = normalize_cache_factory(cache, default_factory=InMemCache) - cache_obj = cache_factory() - cache_get = cache_obj.get - cache_set = cache_obj.set - - def decorator(loader_func: Callable[[], T]) -> Callable[[], T]: - if asyncio.iscoroutinefunction(loader_func): - loader_lock: asyncio.Lock | None = None - - async def run_once() -> T: - nonlocal loader_lock - if loader_lock is None: - loader_lock = asyncio.Lock() - async with loader_lock: - try: - data = await loader_func() - cache_set(cache_key, data, ttl) - return data - except Exception as e: # pragma: no cover - defensive - if on_error: - try: - on_error(e) - except Exception: - logger.exception( - "Async BGCache error handler failed for key %r", - cache_key, - ) - logger.exception( - "Async BGCache writer failed for key %r", cache_key - ) - raise - - async def refresh_job() -> None: - try: - await run_once() - except Exception: - # Error already handled/logged inside run_once - pass - - next_run_time: datetime | None = None - - if run_immediately: - if cache_get(cache_key) is None: - try: - loop = asyncio.get_running_loop() - except RuntimeError: - asyncio.run(refresh_job()) - next_run_time = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - else: - loop.create_task(refresh_job()) - next_run_time = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - - if interval_seconds > 0: - scheduler = SharedAsyncScheduler.get_scheduler() - SharedAsyncScheduler.ensure_started() - scheduler.add_job( - refresh_job, - trigger=IntervalTrigger(seconds=interval_seconds), - id=cache_key, - replace_existing=True, - next_run_time=next_run_time, - ) - - async def writer_wrapper() -> T: - value = cache_get(cache_key) - if value is not None: - return value # type: ignore[return-value] - return await run_once() - - attach_wrapper_metadata( - writer_wrapper, - loader_func, - cache_obj=cache_obj, - cache_key=cache_key, - ) - cls._writer_registry[cache_key] = _WriterRecord( - cache_key=cache_key, - cache=cache_obj, - ttl=ttl, - loader_wrapper=writer_wrapper, - is_async=True, - ) - return writer_wrapper # type: ignore - - # Sync writer path - from threading import Lock - - loader_lock = Lock() - - def run_once_sync() -> T: - with loader_lock: - try: - data = loader_func() - cache_set(cache_key, data, ttl) - return data - except Exception as e: # pragma: no cover - defensive - if on_error: - try: - on_error(e) - except Exception: - logger.exception( - "Sync BGCache error handler failed for key %r", - cache_key, - ) - logger.exception( - "Sync BGCache writer failed for key %r", cache_key - ) - raise - - def refresh_job_sync() -> None: - try: - run_once_sync() - except Exception: - # Error already handled/logged inside run_once_sync - pass - - next_run_time_sync: datetime | None = None - - if run_immediately: - if cache_get(cache_key) is None: - refresh_job_sync() - next_run_time_sync = datetime.now() + timedelta( - seconds=interval_seconds * 2 - ) - - if interval_seconds > 0: - scheduler_sync = SharedScheduler.get_scheduler() - SharedScheduler.start() - scheduler_sync.add_job( - refresh_job_sync, - trigger=IntervalTrigger(seconds=interval_seconds), - id=cache_key, - replace_existing=True, - next_run_time=next_run_time_sync, - ) - - def writer_wrapper_sync() -> T: - value = cache_get(cache_key) - if value is not None: - return value # type: ignore[return-value] - return run_once_sync() - - attach_wrapper_metadata( - writer_wrapper_sync, - loader_func, - cache_obj=cache_obj, - cache_key=cache_key, - ) - cls._writer_registry[cache_key] = _WriterRecord( - cache_key=cache_key, - cache=cache_obj, - ttl=ttl, - loader_wrapper=writer_wrapper_sync, - is_async=False, - ) - return writer_wrapper_sync # type: ignore - - return decorator - - @classmethod - def get_reader( - cls, - key: str, - interval_seconds: int, - ttl: int | None = None, - *, - run_immediately: bool = True, - on_error: Callable[[Exception], None] | None = None, - cache: CacheStorage | Callable[[], CacheStorage] | None = None, - ) -> Callable[[], T | None]: - cache_key = key - - if interval_seconds <= 0: - interval_seconds = 0 - if ttl is None and interval_seconds > 0: - ttl = interval_seconds * 2 - if ttl is None: - ttl = 0 - - # Source cache (shared/distributed) to pull from; local_cache used for fast reads. - source_cache_factory = normalize_cache_factory( - cache, default_factory=InMemCache - ) - source_cache = source_cache_factory() - local_cache = InMemCache() - source_get = source_cache.get - local_get = local_cache.get - local_set = local_cache.set - - def load_once() -> None: - try: - value = source_get(cache_key) - if value is not None: - local_set(cache_key, value, ttl) - except Exception as e: # pragma: no cover - defensive - if on_error: - try: - on_error(e) - except Exception: - logger.exception( - "BGCache reader on_error failed for key %r", cache_key - ) - else: - logger.exception( - "BGCache reader refresh failed for key %r", cache_key - ) - - if run_immediately and (interval_seconds > 0 or ttl > 0): - load_once() - - if interval_seconds > 0: - scheduler_sync = SharedScheduler.get_scheduler() - SharedScheduler.start() - scheduler_sync.add_job( - load_once, - trigger=IntervalTrigger(seconds=interval_seconds), - id=f"reader:{cache_key}", - replace_existing=True, - ) - - def read_only_reader() -> T | None: - value = local_get(cache_key) - if value is not None: - return value - # Fallback: pull once from source on demand if not already cached. - load_once() - return local_get(cache_key) - - attach_wrapper_metadata( - read_only_reader, - read_only_reader, - cache_obj=local_cache, - cache_key=cache_key, - ) - return read_only_reader # type: ignore - - -BGCache = AsyncBackgroundCache - - -@dataclass(slots=True) -class _WriterRecord: - cache_key: str - cache: CacheStorage - ttl: int - loader_wrapper: Callable[[], Any] | Callable[[], Any] - is_async: bool diff --git a/src/advanced_caching/metrics.py b/src/advanced_caching/metrics.py index 460cebf..268e072 100644 --- a/src/advanced_caching/metrics.py +++ b/src/advanced_caching/metrics.py @@ -13,7 +13,7 @@ import threading import time -from collections import defaultdict +from collections import defaultdict, deque from typing import Any, Protocol @@ -345,8 +345,8 @@ def __init__(self, max_latency_samples: int = 1000): self._deletes: dict[str, int] = defaultdict(int) self._errors: dict[tuple[str, str, str], int] = defaultdict(int) - # Store recent latencies for percentile calculation - self._latencies: dict[tuple[str, str], list[float]] = defaultdict(list) + # Fixed-size ring buffers for O(1) append + bounded memory. + self._latencies: dict[tuple[str, str], deque[float]] = {} self._max_samples = max_latency_samples # Memory usage (latest value per cache) @@ -403,12 +403,11 @@ def record_latency( ) -> None: key = (cache_name, operation) with self._lock: - samples = self._latencies[key] - samples.append(duration_seconds) - - # Keep only recent samples - if len(samples) > self._max_samples: - samples.pop(0) + buf = self._latencies.get(key) + if buf is None: + buf = deque(maxlen=self._max_samples) + self._latencies[key] = buf + buf.append(duration_seconds) def record_error( self, diff --git a/src/advanced_caching/serializers.py b/src/advanced_caching/serializers.py new file mode 100644 index 0000000..d822b58 --- /dev/null +++ b/src/advanced_caching/serializers.py @@ -0,0 +1,214 @@ +"""Serialization layer for cache backends. + +Public symbols: + Serializer — protocol (duck-typed, no ABC overhead) + pack_entry — encode a CacheEntry to bytes + unpack_entry — decode bytes back to a CacheEntry + pickle — PickleSerializer singleton (handles full entry natively) + json — JsonSerializer singleton (orjson, value-only) + msgpack — MsgpackSerializer singleton (requires pip install msgpack) + protobuf(cls) — factory: returns a serializer for a protobuf message class + +Usage:: + + from advanced_caching import serializers, RedisCache + + store = RedisCache(client, serializer=serializers.json) + store = RedisCache(client, serializer=serializers.msgpack) + store = RedisCache(client, serializer=serializers.protobuf(MyProtoMessage)) +""" + +from __future__ import annotations + +import pickle as _pickle_mod +import struct +from typing import Any, Protocol, runtime_checkable + +from .storage.utils import CacheEntry + +# ────────────────────────────────────────────────────────────────────────────── +# Protocol +# ────────────────────────────────────────────────────────────────────────────── + + +@runtime_checkable +class Serializer(Protocol): + """Minimal serialization contract. + + Optional class attribute ``handles_entries: bool`` controls how + :func:`pack_entry` / :func:`unpack_entry` treat the serializer: + + * ``True`` — the serializer encodes the full :class:`~storage.utils.CacheEntry` + object (e.g. Pickle). No binary header is prepended. + * ``False`` (default) — the serializer encodes only the *value*. A 16-byte + binary header containing ``(fresh_until, created_at)`` as two 64-bit floats + is prepended so any serializer can be used without a custom schema. + """ + + def dumps(self, obj: Any) -> bytes: ... + def loads(self, data: bytes) -> Any: ... + + +# ────────────────────────────────────────────────────────────────────────────── +# pack / unpack — single place that knows the wire format +# ────────────────────────────────────────────────────────────────────────────── + +# Header: two 64-bit big-endian floats → (fresh_until, created_at) = 16 bytes +_HDR = struct.Struct(">dd") +_HDR_SIZE = _HDR.size # 16 + + +def pack_entry(entry: CacheEntry, s: Serializer) -> bytes: + """Serialize *entry* to bytes using serializer *s*. + + Serializers with ``handles_entries=True`` encode the whole entry in one + pass. All others receive the value only; metadata is prepended as a + compact 16-byte struct header so **any** serializer works out of the box. + """ + if getattr(s, "handles_entries", False): + return s.dumps(entry) + return _HDR.pack(entry.fresh_until, entry.created_at) + s.dumps(entry.value) + + +def unpack_entry(data: bytes, s: Serializer) -> CacheEntry: + """Deserialize bytes produced by :func:`pack_entry` back to a CacheEntry.""" + if getattr(s, "handles_entries", False): + return s.loads(data) + fresh_until, created_at = _HDR.unpack_from(data) + value = s.loads(data[_HDR_SIZE:]) + return CacheEntry(value=value, fresh_until=fresh_until, created_at=created_at) + + +# ────────────────────────────────────────────────────────────────────────────── +# Built-in serializers +# ────────────────────────────────────────────────────────────────────────────── + + +class PickleSerializer: + """Pickle — serializes the full CacheEntry in one shot, no header needed.""" + + __slots__ = () + handles_entries: bool = True + + @staticmethod + def dumps(obj: Any) -> bytes: + return _pickle_mod.dumps(obj, protocol=_pickle_mod.HIGHEST_PROTOCOL) + + @staticmethod + def loads(data: bytes) -> Any: + return _pickle_mod.loads(data) + + +class JsonSerializer: + """JSON via orjson — value only; entry metadata lives in the binary header.""" + + __slots__ = () + handles_entries: bool = False + + @staticmethod + def dumps(obj: Any) -> bytes: + import orjson # lazy — avoids hard dep at module import time + + return orjson.dumps(obj) + + @staticmethod + def loads(data: bytes) -> Any: + import orjson + + return orjson.loads(data) + + +class MsgpackSerializer: + """msgpack — value only; entry metadata lives in the binary header. + + Requires ``pip install msgpack``. + """ + + __slots__ = () + handles_entries: bool = False + + @staticmethod + def dumps(obj: Any) -> bytes: + try: + import msgpack + except ImportError as exc: + raise ImportError("msgpack required: pip install msgpack") from exc + return msgpack.packb(obj, use_bin_type=True) + + @staticmethod + def loads(data: bytes) -> Any: + try: + import msgpack + except ImportError as exc: + raise ImportError("msgpack required: pip install msgpack") from exc + return msgpack.unpackb(data, raw=False) + + +def protobuf(message_class: type) -> Serializer: + """Return a serializer for a protobuf *message_class*. + + The class must expose ``SerializeToString()`` / ``FromString(data)``, + which all generated protobuf classes do. + + Example:: + + from myproto import UserMessage + store = RedisCache(client, serializer=serializers.protobuf(UserMessage)) + + The entry metadata (TTL timestamps) is stored in the 16-byte binary header + prepended by :func:`pack_entry`; the protobuf bytes contain only the value. + """ + + class _Protobuf: + __slots__ = ("_cls",) + handles_entries: bool = False + + def __init__(self, cls: type) -> None: + self._cls = cls + + def dumps(self, obj: Any) -> bytes: + return obj.SerializeToString() + + def loads(self, data: bytes) -> Any: + return self._cls.FromString(data) + + return _Protobuf(message_class) + + +# ────────────────────────────────────────────────────────────────────────────── +# Module-level singletons — use these directly +# ────────────────────────────────────────────────────────────────────────────── + +pickle: PickleSerializer = PickleSerializer() +json: JsonSerializer = JsonSerializer() +msgpack: MsgpackSerializer = MsgpackSerializer() + +_ALIASES: dict[str, Serializer] = { + "pickle": pickle, + "json": json, + "msgpack": msgpack, +} + + +def resolve(s: "Serializer | str | None") -> "Serializer": + """Resolve a serializer from an instance, string alias, or ``None`` (→ pickle). + + Accepted values: + * ``None`` → :data:`pickle` + * ``"json"`` → :data:`json` + * ``"msgpack"`` → :data:`msgpack` + * ``"pickle"`` → :data:`pickle` + * Any object with ``dumps``/``loads`` methods. + """ + if s is None: + return pickle + if isinstance(s, str): + try: + return _ALIASES[s] + except KeyError: + raise ValueError( + f"Unknown serializer alias {s!r}. Valid aliases: {list(_ALIASES)}" + ) from None + if callable(getattr(s, "dumps", None)) and callable(getattr(s, "loads", None)): + return s + raise TypeError("serializer must expose dumps/loads methods") diff --git a/src/advanced_caching/storage/__init__.py b/src/advanced_caching/storage/__init__.py index eb184f7..160f743 100644 --- a/src/advanced_caching/storage/__init__.py +++ b/src/advanced_caching/storage/__init__.py @@ -1,10 +1,6 @@ from .utils import ( CacheEntry, CacheStorage, - JsonSerializer, - PickleSerializer, - _BUILTIN_SERIALIZERS, - _hash_bytes, validate_cache_storage, InstrumentedStorage, ) @@ -19,10 +15,6 @@ __all__ = [ "CacheEntry", "CacheStorage", - "JsonSerializer", - "PickleSerializer", - "_BUILTIN_SERIALIZERS", - "_hash_bytes", "validate_cache_storage", "InstrumentedStorage", "InMemCache", diff --git a/src/advanced_caching/storage/chain.py b/src/advanced_caching/storage/chain.py index 11e84b2..3488b67 100644 --- a/src/advanced_caching/storage/chain.py +++ b/src/advanced_caching/storage/chain.py @@ -6,15 +6,60 @@ from .utils import CacheEntry, CacheStorage -class ChainCache: - """Composable multi-level cache (L1→L2→...→Ln).""" +class ChainCache(CacheStorage): + """Composable multi-level cache (L1→L2→...→Ln). - def __init__(self, levels: list[tuple[CacheStorage, int | None]]): + On a cache hit the value is promoted to all faster levels so subsequent + reads stay in the fastest tier. + + Prefer the :meth:`build` classmethod over the low-level constructor:: + + cache = ChainCache.build(InMemCache(), RedisCache(client), ttls=[60, 3600]) + """ + + def __init__(self, levels: list[tuple[CacheStorage, int | float | None]]): if not levels: raise ValueError("ChainCache requires at least one level") self.levels = levels + # Pre-compute capability flags to avoid per-call hasattr overhead. + self._has_get_entry: list[bool] = [hasattr(c, "get_entry") for c, _ in levels] + self._has_set_entry: list[bool] = [hasattr(c, "set_entry") for c, _ in levels] + self._has_clear: list[bool] = [hasattr(c, "clear") for c, _ in levels] + + @classmethod + def build( + cls, + *caches: CacheStorage, + ttls: list[int | float | None] | None = None, + ) -> ChainCache: + """Ergonomic constructor for multi-level caches. + + Args: + *caches: Cache backends ordered from fastest (L1) to slowest (Ln). + ttls: Optional per-level TTL overrides (same length as caches). + ``None`` entries mean "use the caller-supplied TTL as-is". + + Example:: - def _level_ttl(self, level_ttl: int | None, ttl: int) -> int: + cache = ChainCache.build( + InMemCache(), + RedisCache(client), + ttls=[60, 3600], + ) + """ + if not caches: + raise ValueError("At least one cache backend is required") + if ttls is None: + ttls = [None] * len(caches) + elif len(ttls) != len(caches): + raise ValueError( + f"ttls length ({len(ttls)}) must match number of caches ({len(caches)})" + ) + return cls(list(zip(caches, ttls))) + + def _level_ttl( + self, level_ttl: int | float | None, ttl: int | float + ) -> int | float: if level_ttl is None: return ttl if ttl <= 0: @@ -35,7 +80,7 @@ def get(self, key: str) -> Any | None: cache.set(key, hit_value, self._level_ttl(lvl_ttl, 0)) return hit_value - def set(self, key: str, value: Any, ttl: int = 0) -> None: + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: for cache, lvl_ttl in self.levels: cache.set(key, value, self._level_ttl(lvl_ttl, ttl)) @@ -46,13 +91,19 @@ def delete(self, key: str) -> None: except Exception: pass + def clear(self) -> None: + """Clear all entries from every level that supports it.""" + for idx, (cache, _) in enumerate(self.levels): + if self._has_clear[idx]: + cache.clear() # type: ignore[union-attr] + def exists(self, key: str) -> bool: return any(cache.exists(key) for cache, _ in self.levels) - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: hit_entry, hit_index = None, None for idx, (cache, lvl_ttl) in enumerate(self.levels): - if hasattr(cache, "get_entry"): + if self._has_get_entry[idx]: entry = cache.get_entry(key) # type: ignore[attr-defined] else: value = cache.get(key) @@ -69,30 +120,30 @@ def get_entry(self, key: str) -> CacheEntry | None: return None for promote_idx in range(0, hit_index): cache, lvl_ttl = self.levels[promote_idx] - if hasattr(cache, "set_entry"): - cache.set_entry( + if self._has_set_entry[promote_idx]: + cache.set_entry( # type: ignore[attr-defined] key, hit_entry, - ttl=self._level_ttl( - lvl_ttl, int(hit_entry.fresh_until - time.time()) - ), - ) # type: ignore[attr-defined] + ttl=self._level_ttl(lvl_ttl, hit_entry.fresh_until - time.time()), + ) else: cache.set(key, hit_entry.value, self._level_ttl(lvl_ttl, 0)) return hit_entry - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: - for cache, lvl_ttl in self.levels: + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: + for idx, (cache, lvl_ttl) in enumerate(self.levels): effective_ttl = self._level_ttl( lvl_ttl, - ttl if ttl is not None else int(entry.fresh_until - time.time()), + ttl if ttl is not None else entry.fresh_until - time.time(), ) - if hasattr(cache, "set_entry"): + if self._has_set_entry[idx]: cache.set_entry(key, entry, ttl=effective_ttl) # type: ignore[attr-defined] else: cache.set(key, entry.value, effective_ttl) - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: *upper_levels, deepest = self.levels[:-1], self.levels[-1] deep_cache, deep_ttl = deepest deep_success = deep_cache.set_if_not_exists( diff --git a/src/advanced_caching/storage/gcs_cache.py b/src/advanced_caching/storage/gcs_cache.py index abdf272..cc7a6f4 100644 --- a/src/advanced_caching/storage/gcs_cache.py +++ b/src/advanced_caching/storage/gcs_cache.py @@ -1,10 +1,18 @@ from __future__ import annotations import gzip +import hashlib +import time from concurrent.futures import ThreadPoolExecutor from typing import Any -from .utils import CacheEntry, Serializer, _BUILTIN_SERIALIZERS, _hash_bytes +from .utils import CacheEntry, CacheStorage +from ..serializers import ( + Serializer, + pack_entry, + unpack_entry, + resolve as _resolve_serializer, +) try: from google.cloud import storage as gcs @@ -12,95 +20,77 @@ gcs = None -class GCSCache: +def _hash_bytes(data: bytes) -> str: + return hashlib.blake2b(data, digest_size=16).hexdigest() + + +class GCSCache(CacheStorage): + """Google Cloud Storage-backed cache. + + Pass any :class:`~advanced_caching.serializers.Serializer` instance. + """ + def __init__( self, bucket: str, prefix: str = "", client: Any | None = None, - serializer: str | Serializer | None = "pickle", + serializer: Serializer | None = None, compress: bool = True, compress_level: int = 6, dedupe_writes: bool = False, ): if gcs is None: raise ImportError( - "google-cloud-storage required for GCSCache. Install: pip install google-cloud-storage" + "google-cloud-storage required for GCSCache. " + "Install: pip install google-cloud-storage" ) self.bucket_name = bucket self.prefix = prefix self.client = client or gcs.Client() self.bucket = self.client.bucket(bucket) + self._ser = _resolve_serializer(serializer) self.compress = compress self.compress_level = compress_level - self.serializer = self._resolve_serializer(serializer) self._dedupe_writes = dedupe_writes - def _resolve_serializer(self, serializer: str | Serializer | None) -> Serializer: - if serializer is None: - serializer = "pickle" - if isinstance(serializer, str): - name = serializer.lower() - if name not in _BUILTIN_SERIALIZERS: - raise ValueError("Unsupported serializer. Use 'pickle' or 'json'.") - return _BUILTIN_SERIALIZERS[name] - if hasattr(serializer, "dumps") and hasattr(serializer, "loads"): - return serializer - raise TypeError("serializer must be a string or provide dumps/loads methods") - def _make_blob(self, key: str): - path = f"{self.prefix}{key}" - return self.bucket.blob(path) - - def _serialize(self, value: Any) -> bytes: - data = self.serializer.dumps(value) - if self.compress: - return gzip.compress(data, compresslevel=self.compress_level) - return data + return self.bucket.blob(f"{self.prefix}{key}") + + def _encode(self, entry: CacheEntry) -> bytes: + data = pack_entry(entry, self._ser) + return ( + gzip.compress(data, compresslevel=self.compress_level) + if self.compress + else data + ) - def _deserialize(self, data: bytes) -> Any: - if self.compress: - data = gzip.decompress(data) - return self.serializer.loads(data) + def _decode(self, raw: bytes) -> CacheEntry | None: + try: + data = gzip.decompress(raw) if self.compress else raw + return unpack_entry(data, self._ser) + except Exception: + return None def get(self, key: str) -> Any | None: blob = self._make_blob(key) try: - data = blob.download_as_bytes() - value = self._deserialize(data) - if isinstance(value, dict) and value.get("__ac_type") == "entry": - entry = CacheEntry( - value=value.get("v"), - fresh_until=float(value.get("f", 0.0)), - created_at=float(value.get("c", 0.0)), - ) - return entry.value if entry.is_fresh() else None - return value + entry = self._decode(blob.download_as_bytes()) + if entry is None: + return None + return entry.value if entry.is_fresh() else None except Exception: return None - def set(self, key: str, value: Any, ttl: int = 0) -> None: - blob = self._make_blob(key) - import time - + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: now = time.time() - entry: CacheEntry | None = None - if isinstance(value, CacheEntry): - entry = value - elif ttl != 0: - entry = CacheEntry(value=value, fresh_until=now + ttl, created_at=now) - - payload = ( - { - "__ac_type": "entry", - "v": entry.value, - "f": entry.fresh_until, - "c": entry.created_at, - } - if entry - else value + entry = CacheEntry( + value=value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, ) - data = self._serialize(payload) + data = self._encode(entry) + blob = self._make_blob(key) try: if self._dedupe_writes: try: @@ -119,52 +109,35 @@ def set(self, key: str, value: Any, ttl: int = 0) -> None: raise RuntimeError(f"GCSCache set failed: {e}") def delete(self, key: str) -> None: - blob = self._make_blob(key) try: - blob.delete() + self._make_blob(key).delete() except Exception: pass def exists(self, key: str) -> bool: - blob = self._make_blob(key) try: - blob.reload() + self._make_blob(key).reload() return True except Exception: return False - def get_entry(self, key: str) -> CacheEntry | None: - blob = self._make_blob(key) + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: try: - data = blob.download_as_bytes() - value = self._deserialize(data) - if isinstance(value, dict) and value.get("__ac_type") == "entry": - entry = CacheEntry( - value=value.get("v"), - fresh_until=float(value.get("f", 0.0)), - created_at=float(value.get("c", 0.0)), - ) - return entry - import time - - now = time.time() - return CacheEntry(value=value, fresh_until=float("inf"), created_at=now) + return self._decode(self._make_blob(key).download_as_bytes()) except Exception: return None - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: - import time - + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: if ttl is not None: now = time.time() - entry = CacheEntry(value=entry.value, fresh_until=now + ttl, created_at=now) - payload = { - "__ac_type": "entry", - "v": entry.value, - "f": entry.fresh_until, - "c": entry.created_at, - } - data = self._serialize(payload) + entry = CacheEntry( + value=entry.value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, + ) + data = self._encode(entry) blob = self._make_blob(key) try: if self._dedupe_writes: @@ -183,30 +156,36 @@ def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None except Exception as e: raise RuntimeError(f"GCSCache set_entry failed: {e}") - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: - blob = self._make_blob(key) + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: + now = time.time() + entry = CacheEntry( + value=value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, + ) try: - blob.upload_from_string(self._serialize(value), if_generation_match=0) + self._make_blob(key).upload_from_string( + self._encode(entry), if_generation_match=0 + ) return True except Exception: return False def get_many(self, keys: list[str]) -> dict[str, Any]: """Parallel fetch using threads.""" - results = {} - with ThreadPoolExecutor(max_workers=min(32, len(keys) + 1)) as executor: - future_to_key = {executor.submit(self.get, key): key for key in keys} - for future in future_to_key: - key = future_to_key[future] + results: dict[str, Any] = {} + with ThreadPoolExecutor(max_workers=min(32, len(keys) + 1)) as ex: + future_to_key = {ex.submit(self.get, k): k for k in keys} + for future, k in future_to_key.items(): try: val = future.result() if val is not None: - results[key] = val + results[k] = val except Exception: pass return results - def set_many(self, mapping: dict[str, Any], ttl: int = 0) -> None: + def set_many(self, mapping: dict[str, Any], ttl: int | float = 0) -> None: """Parallel set using threads.""" - with ThreadPoolExecutor(max_workers=min(32, len(mapping) + 1)) as executor: - executor.map(lambda item: self.set(item[0], item[1], ttl), mapping.items()) + with ThreadPoolExecutor(max_workers=min(32, len(mapping) + 1)) as ex: + ex.map(lambda item: self.set(item[0], item[1], ttl), mapping.items()) diff --git a/src/advanced_caching/storage/hybrid.py b/src/advanced_caching/storage/hybrid.py index 04c0eaf..bf36767 100644 --- a/src/advanced_caching/storage/hybrid.py +++ b/src/advanced_caching/storage/hybrid.py @@ -6,15 +6,15 @@ from .utils import CacheEntry, CacheStorage -class HybridCache: +class HybridCache(CacheStorage): """Two-level cache: L1 (InMem) + L2 (distributed).""" def __init__( self, l1_cache: CacheStorage | None = None, l2_cache: CacheStorage | None = None, - l1_ttl: int = 60, - l2_ttl: int | None = None, + l1_ttl: int | float = 60, + l2_ttl: int | float | None = None, ): if l2_cache is None: raise ValueError("l2_cache is required for HybridCache") @@ -22,6 +22,11 @@ def __init__( self.l2 = l2_cache self.l1_ttl = l1_ttl self.l2_ttl = l2_ttl if l2_ttl is not None else l1_ttl * 2 + # Pre-compute capability flags — checked once, not per-call. + self._l2_has_get_entry: bool = hasattr(l2_cache, "get_entry") + self._l2_has_set_entry: bool = hasattr(l2_cache, "set_entry") + self._l1_has_clear: bool = l1_cache is not None and hasattr(l1_cache, "clear") + self._l2_has_clear: bool = hasattr(l2_cache, "clear") def get(self, key: str) -> Any | None: value = self.l1.get(key) if self.l1 else None @@ -32,13 +37,13 @@ def get(self, key: str) -> Any | None: self.l1.set(key, value, self.l1_ttl) return value - def set(self, key: str, value: Any, ttl: int = 0) -> None: + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: if self.l1: self.l1.set(key, value, min(ttl, self.l1_ttl) if ttl > 0 else self.l1_ttl) l2_ttl = min(ttl, self.l2_ttl) if ttl > 0 else self.l2_ttl self.l2.set(key, value, l2_ttl) - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: entry = ( self.l1.get_entry(key) if self.l1 and hasattr(self.l1, "get_entry") @@ -46,7 +51,7 @@ def get_entry(self, key: str) -> CacheEntry | None: ) if entry is not None: return entry - entry = self.l2.get_entry(key) if hasattr(self.l2, "get_entry") else None + entry = self.l2.get_entry(key) if self._l2_has_get_entry else None # type: ignore[attr-defined] if entry is not None and self.l1: self.l1.set_entry(key, entry, ttl=self.l1_ttl) return entry @@ -68,23 +73,49 @@ def delete(self, key: str) -> None: self.l1.delete(key) self.l2.delete(key) + def clear(self) -> None: + """Clear all entries from both cache levels.""" + if self._l1_has_clear and self.l1: + self.l1.clear() # type: ignore[union-attr] + if self._l2_has_clear: + self.l2.clear() # type: ignore[union-attr] + def exists(self, key: str) -> bool: return (self.l1.exists(key) if self.l1 else False) or self.l2.exists(key) - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: l2_ttl = min(ttl, self.l2_ttl) if ttl > 0 else self.l2_ttl success = self.l2.set_if_not_exists(key, value, l2_ttl) if success and self.l1: self.l1.set(key, value, min(ttl, self.l1_ttl) if ttl > 0 else self.l1_ttl) return success - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: - ttl = ttl if ttl is not None else max(int(entry.fresh_until - time.time()), 0) + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: + ttl = ttl if ttl is not None else max(entry.fresh_until - time.time(), 0) l1_ttl = min(ttl, self.l1_ttl) if ttl > 0 else self.l1_ttl l2_ttl = min(ttl, self.l2_ttl) if ttl > 0 else self.l2_ttl if self.l1: self.l1.set_entry(key, entry, ttl=l1_ttl) - if hasattr(self.l2, "set_entry"): - self.l2.set_entry(key, entry, ttl=l2_ttl) + if self._l2_has_set_entry: + self.l2.set_entry(key, entry, ttl=l2_ttl) # type: ignore[attr-defined] else: self.l2.set(key, entry.value, l2_ttl) + + def get_memory_usage(self) -> dict[str, Any]: + """Aggregate memory usage from L1 and L2 caches.""" + total_bytes = 0 + total_entries = 0 + + for cache in [self.l1, self.l2]: + if cache and hasattr(cache, "get_memory_usage"): + usage = cache.get_memory_usage() + total_bytes += usage.get("bytes_used", 0) + total_entries += usage.get("entry_count", 0) + + return { + "bytes_used": total_bytes, + "entry_count": total_entries, + "avg_entry_size": total_bytes / total_entries if total_entries > 0 else 0, + } diff --git a/src/advanced_caching/storage/inmem.py b/src/advanced_caching/storage/inmem.py index e68854d..efa46ea 100644 --- a/src/advanced_caching/storage/inmem.py +++ b/src/advanced_caching/storage/inmem.py @@ -5,121 +5,135 @@ import time from typing import Any -from .utils import CacheEntry +from .utils import CacheEntry, CacheStorage -class InMemCache: - """Thread-safe in-memory cache with TTL support.""" +class InMemCache(CacheStorage): + """Thread-safe in-memory cache with TTL support. - def __init__(self): + Hot-path design notes + --------------------- + * ``get`` / ``get_entry`` use an *optimistic lock-free read*: Python's GIL + guarantees ``dict.get`` is atomic at the C level, so we read without + acquiring the lock. The lock is only needed when we must *delete* a + stale entry (write path). + * ``get_entry`` accepts an optional pre-computed ``now`` so callers that + already hold a timestamp can avoid a second ``time.time()`` call. + * ``set`` / ``set_entry`` take the lock once and write in one step. + """ + + __slots__ = ("_data", "_lock") + + def __init__(self) -> None: self._data: dict[str, CacheEntry] = {} - self._lock = threading.RLock() - self._memory_tracking_enabled = False + self._lock = threading.Lock() - def _make_entry(self, value: Any, ttl: int) -> CacheEntry: - now = time.time() - fresh_until = now + ttl if ttl > 0 else float("inf") - return CacheEntry(value=value, fresh_until=fresh_until, created_at=now) + # ── read ────────────────────────────────────────────────────────────────── def get(self, key: str) -> Any | None: + # Lock-free read — GIL makes dict.get atomic + entry = self._data.get(key) + if entry is None: + return None + if time.time() < entry.fresh_until: + return entry.value + # Stale: evict under lock (double-check to avoid redundant deletes) with self._lock: entry = self._data.get(key) - if entry is None: - return None - if time.time() >= entry.fresh_until: + if entry is not None and time.time() >= entry.fresh_until: del self._data[key] - return None - return entry.value + return None - def set(self, key: str, value: Any, ttl: int = 0) -> None: - entry = self._make_entry(value, ttl) - with self._lock: - self._data[key] = entry + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: + """Return the raw ``CacheEntry`` — used by SWR for staleness checks. - def delete(self, key: str) -> None: - with self._lock: - self._data.pop(key, None) + Returns the entry even if stale so callers (e.g., SWR) can read + ``entry.fresh_until`` and ``entry.created_at`` to decide whether to + serve stale data and schedule a background refresh. + + The optional *now* parameter is accepted for API compatibility but + is not used for freshness filtering here. + """ + return self._data.get(key) def exists(self, key: str) -> bool: - return self.get(key) is not None + entry = self._data.get(key) + if entry is None: + return False + return time.time() < entry.fresh_until - def get_entry(self, key: str) -> CacheEntry | None: + # ── write ───────────────────────────────────────────────────────────────── + + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: + now = time.time() + fresh_until = now + ttl if ttl > 0 else float("inf") + entry = CacheEntry(value=value, fresh_until=fresh_until, created_at=now) with self._lock: - return self._data.get(key) + self._data[key] = entry - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: if ttl is not None: - entry = self._make_entry(entry.value, ttl) + now = time.time() + fresh_until = now + ttl if ttl > 0 else float("inf") + entry = CacheEntry( + value=entry.value, fresh_until=fresh_until, created_at=now + ) with self._lock: self._data[key] = entry - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: + # Fast lock-free check first + entry = self._data.get(key) + if entry is not None and time.time() < entry.fresh_until: + return False with self._lock: - now = time.time() - if key in self._data and self._data[key].is_fresh(now): + entry = self._data.get(key) + if entry is not None and time.time() < entry.fresh_until: return False - entry = self._make_entry(value, ttl) - self._data[key] = entry + now = time.time() + fresh_until = now + ttl if ttl > 0 else float("inf") + self._data[key] = CacheEntry( + value=value, fresh_until=fresh_until, created_at=now + ) return True + def delete(self, key: str) -> None: + with self._lock: + self._data.pop(key, None) + def clear(self) -> None: with self._lock: self._data.clear() + # ── maintenance ─────────────────────────────────────────────────────────── + def cleanup_expired(self) -> int: with self._lock: now = time.time() - expired_keys = [k for k, e in self._data.items() if e.fresh_until < now] - for k in expired_keys: + expired = [k for k, e in self._data.items() if e.fresh_until <= now] + for k in expired: del self._data[k] - return len(expired_keys) - - @property - def lock(self): - return self._lock + return len(expired) def get_memory_usage(self) -> dict[str, Any]: - """ - Calculate approximate memory usage of the cache. - - Returns: - dict with keys: - - bytes_used: Estimated memory usage in bytes - - entry_count: Number of entries in cache - - avg_entry_size: Average size per entry in bytes - - Note: This is an approximation using sys.getsizeof() which doesn't - account for shared object references. For production monitoring, - consider enabling memory tracking which provides more accurate estimates. - """ + """Approximate memory usage of the cache (shallow sizing).""" with self._lock: if not self._data: - return { - "bytes_used": 0, - "entry_count": 0, - "avg_entry_size": 0, - } - - # Calculate total memory usage - # Dict overhead + keys + entries - total_bytes = sys.getsizeof(self._data) - + return {"bytes_used": 0, "entry_count": 0, "avg_entry_size": 0} + total = sys.getsizeof(self._data) for key, entry in self._data.items(): - # Key size - total_bytes += sys.getsizeof(key) - # Entry object overhead - total_bytes += sys.getsizeof(entry) - # Entry value (approximate) - total_bytes += sys.getsizeof(entry.value) - # Entry metadata - total_bytes += sys.getsizeof(entry.fresh_until) - total_bytes += sys.getsizeof(entry.created_at) - - entry_count = len(self._data) - avg_size = total_bytes // entry_count if entry_count > 0 else 0 - + total += sys.getsizeof(key) + total += sys.getsizeof(entry) + total += sys.getsizeof(entry.value) + count = len(self._data) return { - "bytes_used": total_bytes, - "entry_count": entry_count, - "avg_entry_size": avg_size, + "bytes_used": total, + "entry_count": count, + "avg_entry_size": total // count, } + + @property + def lock(self) -> threading.Lock: + return self._lock diff --git a/src/advanced_caching/storage/local_file.py b/src/advanced_caching/storage/local_file.py index a4b1e90..51119e4 100644 --- a/src/advanced_caching/storage/local_file.py +++ b/src/advanced_caching/storage/local_file.py @@ -1,96 +1,79 @@ from __future__ import annotations +import gzip import os +import shutil import time from pathlib import Path from typing import Any -import gzip +from .utils import CacheEntry, CacheStorage +from ..serializers import ( + Serializer, + pack_entry, + unpack_entry, + resolve as _resolve_serializer, +) + + +class LocalFileCache(CacheStorage): + """Filesystem-backed cache with TTL, optional compression, and atomic writes. + + Pass any :class:`~advanced_caching.serializers.Serializer` instance. -from .utils import CacheEntry, Serializer, _BUILTIN_SERIALIZERS + Example:: + from advanced_caching import serializers, LocalFileCache -class LocalFileCache: - """Filesystem-backed cache with TTL and optional dedupe.""" + store = LocalFileCache("/tmp/mycache", serializer=serializers.json) + """ def __init__( self, root_dir: str | Path, - serializer: str | Serializer | None = "pickle", + serializer: Serializer | None = None, compress: bool = True, compress_level: int = 6, dedupe_writes: bool = False, ): self.root = Path(root_dir) self.root.mkdir(parents=True, exist_ok=True) + self._ser = _resolve_serializer(serializer) self.compress = compress self.compress_level = compress_level - self.serializer = self._resolve_serializer(serializer) self._dedupe_writes = dedupe_writes - def _resolve_serializer(self, serializer: str | Serializer | None) -> Serializer: - if serializer is None: - serializer = "pickle" - if isinstance(serializer, str): - name = serializer.lower() - if name not in _BUILTIN_SERIALIZERS: - raise ValueError("Unsupported serializer. Use 'pickle' or 'json'.") - return _BUILTIN_SERIALIZERS[name] - if hasattr(serializer, "dumps") and hasattr(serializer, "loads"): - return serializer - raise TypeError("serializer must be a string or provide dumps/loads methods") - def _path(self, key: str) -> Path: - return self.root / key - - def _serialize_entry(self, entry: CacheEntry) -> bytes: - payload = { - "__ac_type": "entry", - "v": entry.value, - "f": entry.fresh_until, - "c": entry.created_at, - } - data = self.serializer.dumps(payload) - if self.compress: - data = gzip.compress(data, compresslevel=self.compress_level) - return data - - def _deserialize_entry(self, data: bytes) -> CacheEntry | None: + safe = key.replace(os.sep, "_").replace("/", "_").replace("..", "_") + return self.root / safe + + def _encode(self, entry: CacheEntry) -> bytes: + data = pack_entry(entry, self._ser) + return ( + gzip.compress(data, compresslevel=self.compress_level) + if self.compress + else data + ) + + def _decode(self, raw: bytes) -> CacheEntry | None: try: - if self.compress: - data = gzip.decompress(data) - payload = self.serializer.loads(data) - if isinstance(payload, CacheEntry): - return payload - if isinstance(payload, dict) and payload.get("__ac_type") == "entry": - return CacheEntry( - value=payload.get("v"), - fresh_until=float(payload.get("f", 0.0)), - created_at=float(payload.get("c", 0.0)), - ) - now = time.time() - return CacheEntry(value=payload, fresh_until=float("inf"), created_at=now) + data = gzip.decompress(raw) if self.compress else raw + return unpack_entry(data, self._ser) except Exception: return None def _atomic_write(self, path: Path, data: bytes) -> None: path.parent.mkdir(parents=True, exist_ok=True) - tmp_path = path.with_suffix(path.suffix + ".tmp") - with open(tmp_path, "wb") as tmp: - tmp.write(data) - os.replace(tmp_path, path) + tmp = path.with_suffix(path.suffix + ".tmp") + tmp.write_bytes(data) + os.replace(tmp, path) - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: path = self._path(key) if not path.exists(): return None - try: - entry = self._deserialize_entry(path.read_bytes()) - except Exception: - return None - if entry is None: - return None - if not entry.is_fresh(): + entry = self._decode(path.read_bytes()) + if entry is None or not entry.is_fresh(): try: path.unlink() except Exception: @@ -102,53 +85,68 @@ def get(self, key: str) -> Any | None: entry = self.get_entry(key) return entry.value if entry is not None else None - def set(self, key: str, value: Any, ttl: int = 0) -> None: + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: now = time.time() - fresh_until = now + ttl if ttl > 0 else float("inf") - entry = CacheEntry(value=value, fresh_until=fresh_until, created_at=now) - data = self._serialize_entry(entry) + entry = CacheEntry( + value=value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, + ) + data = self._encode(entry) path = self._path(key) - if self._dedupe_writes and ttl <= 0 and path.exists(): + if self._dedupe_writes and path.exists(): try: - existing_entry = self.get_entry(key) - if existing_entry is not None and existing_entry.value == value: + if path.read_bytes() == data: return except Exception: pass self._atomic_write(path, data) def delete(self, key: str) -> None: - path = self._path(key) try: - path.unlink() + self._path(key).unlink() except Exception: pass def exists(self, key: str) -> bool: return self.get_entry(key) is not None - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: - now = time.time() + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: if ttl is not None: + now = time.time() entry = CacheEntry( value=entry.value, fresh_until=now + ttl if ttl > 0 else float("inf"), created_at=now, ) - data = self._serialize_entry(entry) + data = self._encode(entry) path = self._path(key) - if self._dedupe_writes and ttl is not None and ttl <= 0 and path.exists(): + if self._dedupe_writes and path.exists(): try: - existing_entry = self.get_entry(key) - if existing_entry is not None and existing_entry.value == entry.value: + if path.read_bytes() == data: return except Exception: pass self._atomic_write(path, data) - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: - existing = self.get_entry(key) - if existing is not None: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: + if self.get_entry(key) is not None: return False self.set(key, value, ttl) return True + + def clear(self) -> None: + """Delete all cache files in the root directory.""" + try: + for path in self.root.iterdir(): + try: + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + except Exception: + pass + except Exception: + pass diff --git a/src/advanced_caching/storage/redis_cache.py b/src/advanced_caching/storage/redis_cache.py index b5d51fb..d0b9166 100644 --- a/src/advanced_caching/storage/redis_cache.py +++ b/src/advanced_caching/storage/redis_cache.py @@ -4,7 +4,13 @@ import time from typing import Any -from .utils import CacheEntry, Serializer, _BUILTIN_SERIALIZERS +from .utils import CacheEntry, CacheStorage +from ..serializers import ( + Serializer, + pack_entry, + unpack_entry, + resolve as _resolve_serializer, +) try: import redis @@ -12,77 +18,39 @@ redis = None # type: ignore -class RedisCache: - """Redis-backed cache storage with optional dedupe writes.""" +class RedisCache(CacheStorage): + """Redis-backed cache storage. + + Pass any :class:`~advanced_caching.serializers.Serializer` instance, including + ``serializers.json``, ``serializers.msgpack``, or + ``serializers.protobuf(MyMessage)``. Defaults to pickle. + + Example:: + + from advanced_caching import serializers, RedisCache + import redis + + store = RedisCache( + redis.from_url("redis://localhost"), + prefix="myapp:", + serializer=serializers.json, + ) + """ def __init__( self, redis_client: Any, prefix: str = "", - serializer: str | Serializer | None = "pickle", + serializer: Serializer | None = None, dedupe_writes: bool = False, ): if redis is None: raise ImportError("redis package required. Install: pip install redis") self.client = redis_client self.prefix = prefix - self._serializer, self._wrap_entries = self._resolve_serializer(serializer) + self._ser = _resolve_serializer(serializer) self._dedupe_writes = dedupe_writes - @staticmethod - def _wrap_payload(obj: Any) -> Any: - if isinstance(obj, CacheEntry): - return { - "__ac_type": "entry", - "v": obj.value, - "f": obj.fresh_until, - "c": obj.created_at, - } - return {"__ac_type": "value", "v": obj} - - @staticmethod - def _unwrap_payload(obj: Any) -> Any: - if isinstance(obj, dict): - obj_type = obj.get("__ac_type") - if obj_type == "entry": - return CacheEntry( - value=obj.get("v"), - fresh_until=float(obj.get("f", 0.0)), - created_at=float(obj.get("c", 0.0)), - ) - if obj_type == "value": - return obj.get("v") - return obj - - def _serialize(self, obj: Any) -> bytes: - if self._wrap_entries: - return self._serializer.dumps(self._wrap_payload(obj)) - return self._serializer.dumps(obj) - - def _deserialize(self, data: bytes) -> Any: - obj = self._serializer.loads(data) - if self._wrap_entries: - return self._unwrap_payload(obj) - return obj - - def _resolve_serializer( - self, serializer: str | Serializer | None - ) -> tuple[Serializer, bool]: - if serializer is None: - serializer = "pickle" - if isinstance(serializer, str): - name = serializer.lower() - if name not in _BUILTIN_SERIALIZERS: - raise ValueError("Unsupported serializer. Use 'pickle' or 'json'.") - serializer_obj = _BUILTIN_SERIALIZERS[name] - return serializer_obj, not bool( - getattr(serializer_obj, "handles_entries", False) - ) - if hasattr(serializer, "dumps") and hasattr(serializer, "loads"): - wrap = not bool(getattr(serializer, "handles_entries", False)) - return serializer, wrap - raise TypeError("serializer must be a string or provide dumps/loads methods") - def _make_key(self, key: str) -> str: return f"{self.prefix}{key}" @@ -91,26 +59,25 @@ def get(self, key: str) -> Any | None: data = self.client.get(self._make_key(key)) if data is None: return None - value = self._deserialize(data) - if isinstance(value, CacheEntry): - return value.value if value.is_fresh() else None - return value + entry = unpack_entry(data, self._ser) + return entry.value if entry.is_fresh() else None except Exception: return None - def set(self, key: str, value: Any, ttl: int = 0) -> None: + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: try: - data = self._serialize(value) + now = time.time() + fresh_until = now + ttl if ttl > 0 else float("inf") + entry = CacheEntry(value=value, fresh_until=fresh_until, created_at=now) + data = pack_entry(entry, self._ser) if self._dedupe_writes: existing = self.client.get(self._make_key(key)) if existing is not None and existing == data: if ttl > 0: - expires = max(1, int(math.ceil(ttl))) - self.client.expire(self._make_key(key), expires) + self.client.expire(self._make_key(key), max(1, math.ceil(ttl))) return if ttl > 0: - expires = max(1, int(math.ceil(ttl))) - self.client.setex(self._make_key(key), expires, data) + self.client.setex(self._make_key(key), max(1, math.ceil(ttl)), data) else: self.client.set(self._make_key(key), data) except Exception as e: @@ -125,38 +92,38 @@ def delete(self, key: str) -> None: def exists(self, key: str) -> bool: try: entry = self.get_entry(key) - if entry is None: - return False - return entry.is_fresh() + return entry is not None and entry.is_fresh() except Exception: return False - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: try: data = self.client.get(self._make_key(key)) if data is None: return None - value = self._deserialize(data) - if isinstance(value, CacheEntry): - return value - now = time.time() - return CacheEntry(value=value, fresh_until=float("inf"), created_at=now) + return unpack_entry(data, self._ser) except Exception: return None - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: try: - data = self._serialize(entry) + if ttl is not None: + now = time.time() + entry = CacheEntry( + value=entry.value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, + ) + data = pack_entry(entry, self._ser) if self._dedupe_writes: existing = self.client.get(self._make_key(key)) if existing is not None and existing == data: if ttl is not None and ttl > 0: - expires = max(1, int(math.ceil(ttl))) - self.client.expire(self._make_key(key), expires) + self.client.expire(self._make_key(key), max(1, math.ceil(ttl))) return - expires = None - if ttl is not None and ttl > 0: - expires = max(1, int(math.ceil(ttl))) + expires = max(1, math.ceil(ttl)) if ttl is not None and ttl > 0 else None if expires: self.client.setex(self._make_key(key), expires, data) else: @@ -164,13 +131,26 @@ def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None except Exception as e: raise RuntimeError(f"Redis set_entry failed: {e}") - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: try: - data = self._serialize(value) - expires = None - if ttl > 0: - expires = max(1, int(math.ceil(ttl))) + now = time.time() + fresh_until = now + ttl if ttl > 0 else float("inf") + entry = CacheEntry(value=value, fresh_until=fresh_until, created_at=now) + data = pack_entry(entry, self._ser) + expires = max(1, math.ceil(ttl)) if ttl > 0 else None result = self.client.set(self._make_key(key), data, ex=expires, nx=True) return bool(result) except Exception: return False + + def clear(self) -> None: + """Delete all keys under this cache's prefix (or flushdb if no prefix).""" + try: + if self.prefix: + keys = self.client.keys(f"{self.prefix}*") + if keys: + self.client.delete(*keys) + else: + self.client.flushdb() + except Exception: + pass diff --git a/src/advanced_caching/storage/s3_cache.py b/src/advanced_caching/storage/s3_cache.py index 19fb1cd..2df0e24 100644 --- a/src/advanced_caching/storage/s3_cache.py +++ b/src/advanced_caching/storage/s3_cache.py @@ -1,11 +1,18 @@ from __future__ import annotations import gzip +import hashlib import time from concurrent.futures import ThreadPoolExecutor from typing import Any -from .utils import CacheEntry, Serializer, _BUILTIN_SERIALIZERS, _hash_bytes +from .utils import CacheEntry, CacheStorage +from ..serializers import ( + Serializer, + pack_entry, + unpack_entry, + resolve as _resolve_serializer, +) try: import boto3 @@ -13,13 +20,22 @@ boto3 = None -class S3Cache: +def _hash_bytes(data: bytes) -> str: + return hashlib.blake2b(data, digest_size=16).hexdigest() + + +class S3Cache(CacheStorage): + """S3-backed cache storage. + + Pass any :class:`~advanced_caching.serializers.Serializer` instance. + """ + def __init__( self, bucket: str, prefix: str = "", s3_client: Any | None = None, - serializer: str | Serializer | None = "pickle", + serializer: Serializer | None = None, compress: bool = True, compress_level: int = 6, dedupe_writes: bool = False, @@ -29,93 +45,65 @@ def __init__( self.bucket = bucket self.prefix = prefix self.client = s3_client or boto3.client("s3") + self._ser = _resolve_serializer(serializer) self.compress = compress self.compress_level = compress_level - self.serializer = self._resolve_serializer(serializer) self._dedupe_writes = dedupe_writes - def _resolve_serializer(self, serializer: str | Serializer | None) -> Serializer: - if serializer is None: - serializer = "pickle" - if isinstance(serializer, str): - name = serializer.lower() - if name not in _BUILTIN_SERIALIZERS: - raise ValueError("Unsupported serializer. Use 'pickle' or 'json'.") - return _BUILTIN_SERIALIZERS[name] - if hasattr(serializer, "dumps") and hasattr(serializer, "loads"): - return serializer - raise TypeError("serializer must be a string or provide dumps/loads methods") - def _make_key(self, key: str) -> str: return f"{self.prefix}{key}" - def _serialize(self, value: Any) -> bytes: - data = self.serializer.dumps(value) - if self.compress: - return gzip.compress(data, compresslevel=self.compress_level) - return data + def _encode(self, entry: CacheEntry) -> bytes: + data = pack_entry(entry, self._ser) + return ( + gzip.compress(data, compresslevel=self.compress_level) + if self.compress + else data + ) - def _deserialize(self, data: bytes) -> Any: - if self.compress: - data = gzip.decompress(data) - return self.serializer.loads(data) + def _decode(self, raw: bytes) -> CacheEntry | None: + try: + data = gzip.decompress(raw) if self.compress else raw + return unpack_entry(data, self._ser) + except Exception: + return None def get(self, key: str) -> Any | None: try: obj = self.client.get_object(Bucket=self.bucket, Key=self._make_key(key)) - body = obj["Body"].read() - value = self._deserialize(body) - if isinstance(value, dict) and value.get("__ac_type") == "entry": - entry = CacheEntry( - value=value.get("v"), - fresh_until=float(value.get("f", 0.0)), - created_at=float(value.get("c", 0.0)), - ) - return entry.value if entry.is_fresh() else None - return value + entry = self._decode(obj["Body"].read()) + if entry is None: + return None + return entry.value if entry.is_fresh() else None except Exception: return None - def set(self, key: str, value: Any, ttl: int = 0) -> None: + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: try: now = time.time() - entry: CacheEntry | None = None - if isinstance(value, CacheEntry): - entry = value - elif ttl != 0: - entry = CacheEntry(value=value, fresh_until=now + ttl, created_at=now) - - payload = ( - { - "__ac_type": "entry", - "v": entry.value, - "f": entry.fresh_until, - "c": entry.created_at, - } - if entry - else value + entry = CacheEntry( + value=value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, ) - body = self._serialize(payload) - + body = self._encode(entry) if self._dedupe_writes: try: head = self.client.head_object( Bucket=self.bucket, Key=self._make_key(key) ) - if head and head.get("Metadata", {}).get("ac-hash") == _hash_bytes( - body - ): + if head.get("Metadata", {}).get("ac-hash") == _hash_bytes(body): return except Exception: pass - put_kwargs = { + kwargs: dict[str, Any] = { "Bucket": self.bucket, "Key": self._make_key(key), "Body": body, } if self._dedupe_writes: - put_kwargs["Metadata"] = {"ac-hash": _hash_bytes(body)} - self.client.put_object(**put_kwargs) + kwargs["Metadata"] = {"ac-hash": _hash_bytes(body)} + self.client.put_object(**kwargs) except Exception as e: raise RuntimeError(f"S3Cache set failed: {e}") @@ -132,58 +120,46 @@ def exists(self, key: str) -> bool: except Exception: return False - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: try: obj = self.client.get_object(Bucket=self.bucket, Key=self._make_key(key)) - body = obj["Body"].read() - value = self._deserialize(body) - if isinstance(value, dict) and value.get("__ac_type") == "entry": - entry = CacheEntry( - value=value.get("v"), - fresh_until=float(value.get("f", 0.0)), - created_at=float(value.get("c", 0.0)), - ) - return entry - now = time.time() - return CacheEntry(value=value, fresh_until=float("inf"), created_at=now) + return self._decode(obj["Body"].read()) except Exception: return None - def set_entry(self, key: str, entry: CacheEntry, ttl: int | None = None) -> None: + def set_entry( + self, key: str, entry: CacheEntry, ttl: int | float | None = None + ) -> None: if ttl is not None: now = time.time() - entry = CacheEntry(value=entry.value, fresh_until=now + ttl, created_at=now) - payload = { - "__ac_type": "entry", - "v": entry.value, - "f": entry.fresh_until, - "c": entry.created_at, - } + entry = CacheEntry( + value=entry.value, + fresh_until=now + ttl if ttl > 0 else float("inf"), + created_at=now, + ) try: - body = self._serialize(payload) + body = self._encode(entry) if self._dedupe_writes: try: head = self.client.head_object( Bucket=self.bucket, Key=self._make_key(key) ) - if head and head.get("Metadata", {}).get("ac-hash") == _hash_bytes( - body - ): + if head.get("Metadata", {}).get("ac-hash") == _hash_bytes(body): return except Exception: pass - put_kwargs = { + kwargs: dict[str, Any] = { "Bucket": self.bucket, "Key": self._make_key(key), "Body": body, } if self._dedupe_writes: - put_kwargs["Metadata"] = {"ac-hash": _hash_bytes(body)} - self.client.put_object(**put_kwargs) + kwargs["Metadata"] = {"ac-hash": _hash_bytes(body)} + self.client.put_object(**kwargs) except Exception as e: raise RuntimeError(f"S3Cache set_entry failed: {e}") - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: if self.exists(key): return False try: @@ -194,20 +170,19 @@ def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: def get_many(self, keys: list[str]) -> dict[str, Any]: """Parallel fetch using threads.""" - results = {} - with ThreadPoolExecutor(max_workers=min(32, len(keys) + 1)) as executor: - future_to_key = {executor.submit(self.get, key): key for key in keys} - for future in future_to_key: - key = future_to_key[future] + results: dict[str, Any] = {} + with ThreadPoolExecutor(max_workers=min(32, len(keys) + 1)) as ex: + future_to_key = {ex.submit(self.get, k): k for k in keys} + for future, k in future_to_key.items(): try: val = future.result() if val is not None: - results[key] = val + results[k] = val except Exception: pass return results - def set_many(self, mapping: dict[str, Any], ttl: int = 0) -> None: + def set_many(self, mapping: dict[str, Any], ttl: int | float = 0) -> None: """Parallel set using threads.""" - with ThreadPoolExecutor(max_workers=min(32, len(mapping) + 1)) as executor: - executor.map(lambda item: self.set(item[0], item[1], ttl), mapping.items()) + with ThreadPoolExecutor(max_workers=min(32, len(mapping) + 1)) as ex: + ex.map(lambda item: self.set(item[0], item[1], ttl), mapping.items()) diff --git a/src/advanced_caching/storage/utils.py b/src/advanced_caching/storage/utils.py index 1c13978..20d023b 100644 --- a/src/advanced_caching/storage/utils.py +++ b/src/advanced_caching/storage/utils.py @@ -1,70 +1,15 @@ from __future__ import annotations -import gzip -import hashlib -import json -import math -import pickle +import datetime import sys import time from dataclasses import dataclass from typing import Any, Protocol, TYPE_CHECKING -import orjson - if TYPE_CHECKING: from ..metrics import MetricsCollector -class Serializer(Protocol): - """Simple serializer protocol used by cache backends.""" - - def dumps(self, obj: Any) -> bytes: ... - - def loads(self, data: bytes) -> Any: ... - - -class PickleSerializer: - """Pickle serializer using highest protocol (fastest, flexible).""" - - __slots__ = () - handles_entries = True - - @staticmethod - def dumps(obj: Any) -> bytes: - return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) - - @staticmethod - def loads(data: bytes) -> Any: - return pickle.loads(data) - - -class JsonSerializer: - """JSON serializer for text-friendly payloads (wraps CacheEntry). Uses orjson""" - - __slots__ = () - handles_entries = False - - @staticmethod - def dumps(obj: Any) -> bytes: - return orjson.dumps(obj) - - @staticmethod - def loads(data: bytes) -> Any: - return orjson.loads(data) - - -_BUILTIN_SERIALIZERS: dict[str, Serializer] = { - "pickle": PickleSerializer(), - "json": JsonSerializer(), -} - - -def _hash_bytes(data: bytes) -> str: - """Cheap content hash (blake2b) used to skip redundant writes.""" - return hashlib.blake2b(data, digest_size=16).hexdigest() - - @dataclass(slots=True) class CacheEntry: """Internal cache entry with TTL support.""" @@ -83,31 +28,39 @@ def age(self, now: float | None = None) -> float: now = time.time() return now - self.created_at + def __repr__(self) -> str: + if self.fresh_until == float("inf"): + expires = "never" + else: + expires = datetime.datetime.fromtimestamp(self.fresh_until).isoformat() + age_s = round(self.age(), 1) + return f"CacheEntry(value={self.value!r}, expires={expires}, age={age_s}s)" + class CacheStorage(Protocol): """Protocol for cache storage backends.""" def get(self, key: str) -> Any | None: ... - def set(self, key: str, value: Any, ttl: int = 0) -> None: ... + def set(self, key: str, value: Any, ttl: int | float = 0) -> None: ... def delete(self, key: str) -> None: ... def exists(self, key: str) -> bool: ... - def get_entry(self, key: str) -> CacheEntry | None: ... + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: ... def set_entry( - self, key: str, entry: CacheEntry, ttl: int | None = None + self, key: str, entry: CacheEntry, ttl: int | float | None = None ) -> None: ... - def set_if_not_exists(self, key: str, value: Any, ttl: int) -> bool: ... + def set_if_not_exists(self, key: str, value: Any, ttl: int | float) -> bool: ... def get_many(self, keys: list[str]) -> dict[str, Any]: """Retrieve multiple keys at once. Default implementation is sequential.""" return {k: v for k in keys if (v := self.get(k)) is not None} - def set_many(self, mapping: dict[str, Any], ttl: int = 0) -> None: + def set_many(self, mapping: dict[str, Any], ttl: int | float = 0) -> None: """Set multiple keys at once. Default implementation is sequential.""" for k, v in mapping.items(): self.set(k, v, ttl) @@ -128,7 +81,7 @@ def validate_cache_storage(cache: Any) -> bool: ) -class InstrumentedStorage: +class InstrumentedStorage(CacheStorage): """ Wrapper that adds metrics collection to any CacheStorage backend. @@ -246,10 +199,11 @@ def exists(self, key: str) -> bool: # record separate metrics to avoid double-counting return self._storage.exists(key) - def get_entry(self, key: str) -> CacheEntry | None: + def get_entry(self, key: str, now: float | None = None) -> CacheEntry | None: start = time.perf_counter() try: - result = self._storage.get_entry(key) + inner = getattr(self._storage, "get_entry") + result = inner(key, now) if now is not None else inner(key) duration = time.perf_counter() - start if result is not None: @@ -410,3 +364,12 @@ def get_memory_usage(self) -> dict[str, Any]: def unwrapped_storage(self) -> CacheStorage: """Access the underlying storage backend directly.""" return self._storage + + def get_all_keys(self) -> list[str]: + """Retrieve all keys from the underlying storage if supported. + + Returns empty list if not supported. + """ + if hasattr(self._storage, "get_all_keys"): + return self._storage.get_all_keys() + return [] diff --git a/tests/benchmark.py b/tests/benchmark.py index 8965631..8d286ba 100644 --- a/tests/benchmark.py +++ b/tests/benchmark.py @@ -1,241 +1,259 @@ """ -Benchmarks for advanced_caching (Async-only architecture). +Benchmark harness for advanced-caching hot paths. + +Usage: + uv run python tests/benchmark.py + BENCH_N=200000 uv run python tests/benchmark.py """ from __future__ import annotations import asyncio -import json import os -import random -import sys +import statistics import time -from dataclasses import dataclass -from datetime import datetime -from pathlib import Path -from statistics import mean, median, stdev -from typing import Dict, List - -from advanced_caching import BGCache, SWRCache, TTLCache +from typing import Any +from advanced_caching import cache, bg, InMemCache, ChainCache +from advanced_caching.metrics import InMemoryMetrics # --------------------------------------------------------------------------- -# Config + helpers +# Config # --------------------------------------------------------------------------- +N = int(os.getenv("BENCH_N", "100000")) +WARMUP = 1000 -def _env_int(name: str, default: int) -> int: - raw = os.getenv(name) - if raw is None or raw == "": - return default - return int(raw) - - -def _env_float(name: str, default: float) -> float: - raw = os.getenv(name) - if raw is None or raw == "": - return default - return float(raw) - - -@dataclass(frozen=True) -class Config: - seed: int = 12345 - work_ms: float = 5.0 - warmup: int = 10 - runs: int = 300 - mixed_key_space: int = 100 - mixed_runs: int = 500 - - -CFG = Config( - seed=_env_int("BENCH_SEED", 12345), - work_ms=_env_float("BENCH_WORK_MS", 5.0), - warmup=_env_int("BENCH_WARMUP", 10), - runs=_env_int("BENCH_RUNS", 300), - mixed_key_space=_env_int("BENCH_MIXED_KEY_SPACE", 100), - mixed_runs=_env_int("BENCH_MIXED_RUNS", 500), -) -RNG = random.Random(CFG.seed) - - -@dataclass(frozen=True) -class Stats: - label: str - notes: str - runs: int - median_ms: float - mean_ms: float - stdev_ms: float - - -async def async_io_bound_call(user_id: int) -> dict: - await asyncio.sleep(CFG.work_ms / 1000.0) - return {"id": user_id, "name": f"User{user_id}"} - - -async def _timed_async(fn, warmup: int, runs: int) -> List[float]: - for _ in range(warmup): - await fn() - out: List[float] = [] - for _ in range(runs): - t0 = time.perf_counter() - await fn() - out.append((time.perf_counter() - t0) * 1000.0) - return out - - -def stats_from_samples( - label: str, notes: str, runs: int, samples: List[float] -) -> Stats: - return Stats( - label, - notes, - runs, - median(samples), - mean(samples), - stdev(samples) if len(samples) > 1 else 0.0, - ) +def _timer(fn, n: int) -> tuple[float, float]: + """Return (total_seconds, ops_per_second).""" + for _ in range(WARMUP): + fn() + t0 = time.perf_counter() + for _ in range(n): + fn() + elapsed = time.perf_counter() - t0 + return elapsed, n / elapsed -def print_table(title: str, rows: List[Stats]) -> None: - print("\n" + title) - print("-" * len(title)) - print( - f"{'Strategy':<22} {'Median (ms)':>12} {'Mean (ms)':>12} {'Stdev (ms)':>12} Notes" - ) - for r in rows: - print( - f"{r.label:<22} {r.median_ms:>12.4f} {r.mean_ms:>12.4f} {r.stdev_ms:>12.4f} {r.notes}" - ) - - -def append_json_log( - status: str, error: str | None, sections: Dict[str, List[Stats]] -) -> None: - payload = { - "ts": datetime.now().isoformat(timespec="seconds"), - "status": status, - "error": error, - "command": "python " + " ".join(sys.argv), - "python": sys.version.split()[0], - "config": { - "seed": CFG.seed, - "work_ms": CFG.work_ms, - "warmup": CFG.warmup, - "runs": CFG.runs, - "mixed_key_space": CFG.mixed_key_space, - "mixed_runs": CFG.mixed_runs, - }, - "results": { - name: [ - { - "label": s.label, - "notes": s.notes, - "runs": s.runs, - "median_ms": round(s.median_ms, 6), - "mean_ms": round(s.mean_ms, 6), - "stdev_ms": round(s.stdev_ms, 6), - } - for s in rows - ] - for name, rows in sections.items() - }, - } - try: - log_path = Path(__file__).resolve().parent.parent / "benchmarks.log" - log_path.parent.mkdir(parents=True, exist_ok=True) - with log_path.open("a", encoding="utf-8") as fh: - fh.write(json.dumps(payload, ensure_ascii=False) + "\n") - except Exception: - pass - - -def shutdown_schedulers() -> None: - try: - BGCache.shutdown(wait=False) - except Exception: - pass +async def _atimer(coro_fn, n: int) -> tuple[float, float]: + for _ in range(WARMUP): + await coro_fn() + t0 = time.perf_counter() + for _ in range(n): + await coro_fn() + elapsed = time.perf_counter() - t0 + return elapsed, n / elapsed + + +def _row(label: str, elapsed: float, ops: float) -> None: + print(f" {label:<42} {ops / 1e6:>6.2f}M ops/s ({elapsed * 1000:.1f} ms total)") # --------------------------------------------------------------------------- -# Scenarios +# Benchmarks # --------------------------------------------------------------------------- -async def scenario_hot_hits() -> List[Stats]: - """Benchmark hot cache hits for all strategies.""" +def bench_inmem_raw(): + """Baseline: raw InMemCache.get() / .set().""" + store = InMemCache() + store.set("k", {"v": 1}, ttl=3600) + elapsed, ops = _timer(lambda: store.get("k"), N) + _row("InMemCache.get() raw", elapsed, ops) - # 1. TTLCache - @TTLCache.cached("bench:ttl:{}", ttl=60) - async def ttl_fn(user_id: int) -> dict: - return await async_io_bound_call(user_id) - # Prime cache - await ttl_fn(1) +def bench_cache_sync_hit(): + """@cache sync hit path (static key).""" - ttl_samples = await _timed_async( - lambda: ttl_fn(1), warmup=CFG.warmup, runs=CFG.runs - ) - ttl_stats = stats_from_samples("TTLCache", "hot hit", CFG.runs, ttl_samples) + @cache(3600, key="bench_sync") + def fn() -> dict: + return {"v": 1} - # 2. SWRCache - @SWRCache.cached("bench:swr:{}", ttl=60, stale_ttl=30) - async def swr_fn(user_id: int) -> dict: - return await async_io_bound_call(user_id) + fn() # prime + elapsed, ops = _timer(fn, N) + _row("@cache sync hit (static key)", elapsed, ops) - # Prime cache - await swr_fn(1) - swr_samples = await _timed_async( - lambda: swr_fn(1), warmup=CFG.warmup, runs=CFG.runs - ) - swr_stats = stats_from_samples("SWRCache", "hot hit", CFG.runs, swr_samples) +def bench_cache_sync_keyed(): + """@cache sync with named key template.""" + + @cache(3600, key="bench:{user_id}") + def get_user(user_id: int) -> dict: + return {"id": user_id} + + get_user(1) # prime + elapsed, ops = _timer(lambda: get_user(1), N) + _row("@cache sync hit (named key)", elapsed, ops) + + +def bench_cache_async_hit(): + """@cache async hit path.""" + + @cache(3600, key="bench_async") + async def fn() -> dict: + return {"v": 1} + + async def run(): + await fn() # prime + elapsed, ops = await _atimer(fn, N) + _row("@cache async hit (static key)", elapsed, ops) + + asyncio.run(run()) + + +def bench_cache_swr_hit(): + """@cache SWR path — serve stale, no refresh triggered (stale window).""" + + @cache(0.0001, stale=3600, key="bench_swr") + def fn() -> dict: + return {"v": 1} + + fn() # prime + time.sleep(0.001) # go stale but inside window + elapsed, ops = _timer(fn, N) + _row("@cache SWR stale-serve", elapsed, ops) + + +def bench_cache_miss(): + """@cache sync miss + set (measures miss path overhead).""" + calls = {"n": 0} + + @cache(0, key="bench_miss:{x}") # ttl=0 → always miss + def fn(x: int) -> int: + calls["n"] += 1 + return x + + elapsed, ops = _timer(lambda: fn(1), N) + _row("@cache sync miss (ttl=0)", elapsed, ops) - # 3. BGCache - @BGCache.register_loader("bench:bg", interval_seconds=60, run_immediately=True) - async def bg_loader() -> dict: - return await async_io_bound_call(1) - # Wait for load - await asyncio.sleep(0.05) +def bench_chain_cache(): + """ChainCache (L1 InMem + L2 InMem) hit on L1.""" + l1, l2 = InMemCache(), InMemCache() + chain = ChainCache.build(l1, l2, ttls=[60, 3600]) - bg_samples = await _timed_async( - lambda: bg_loader(), warmup=CFG.warmup, runs=CFG.runs + @cache(3600, key="chain_bench", store=chain) + def fn() -> dict: + return {"v": 1} + + fn() # prime + elapsed, ops = _timer(fn, N) + _row("@cache ChainCache L1 hit", elapsed, ops) + + +def bench_bg_read(): + """bg.read() callable (local dict lookup only).""" + store = InMemCache() + store.set("bg_bench", {"v": 1}, ttl=3600) + reader = bg.read("bg_bench", interval=60, store=store) + elapsed, ops = _timer(reader, N) + _row("bg.read() local hit", elapsed, ops) + bg.shutdown() + + +def bench_with_metrics(): + """@cache + InMemoryMetrics overhead.""" + m = InMemoryMetrics() + + @cache(3600, key="bench_metrics", metrics=m) + def fn() -> dict: + return {"v": 1} + + fn() # prime + elapsed, ops = _timer(fn, N) + _row("@cache sync hit + InMemoryMetrics", elapsed, ops) + + +def bench_cache_callable_key(): + """@cache sync with callable key (lambda) vs named template.""" + import hashlib + + @cache(3600, key=lambda user_id: f"bench_callable:{user_id}") + def fn_lambda(user_id: int) -> dict: + return {"id": user_id} + + @cache(3600, key="bench_template:{user_id}") + def fn_template(user_id: int) -> dict: + return {"id": user_id} + + @cache( + 3600, + key=lambda user_id: ( + f"bench_hash:{hashlib.md5(str(user_id).encode()).hexdigest()[:8]}" + ), ) - bg_stats = stats_from_samples("BGCache", "preloaded", CFG.runs, bg_samples) + def fn_hash(user_id: int) -> dict: + return {"id": user_id} + + fn_lambda(1) + fn_template(1) + fn_hash(1) + + elapsed, ops = _timer(lambda: fn_lambda(1), N) + _row("@cache sync hit (callable λ key)", elapsed, ops) + + elapsed, ops = _timer(lambda: fn_template(1), N) + _row("@cache sync hit (template key, same data)", elapsed, ops) + + elapsed, ops = _timer(lambda: fn_hash(1), N) + _row("@cache sync hit (callable hash key)", elapsed, ops) + - return [ttl_stats, swr_stats, bg_stats] +def bench_cache_callable_key_async(): + """@cache async with callable key.""" + @cache(3600, key=lambda tenant, uid: f"bench_async_callable:{tenant}:{uid}") + async def fn(tenant: str, uid: int) -> dict: + return {"tenant": tenant, "uid": uid} -async def run_benchmarks() -> Dict[str, List[Stats]]: - return { - "hot_hits": await scenario_hot_hits(), - } + async def run(): + await fn("acme", 1) # prime + elapsed, ops = await _atimer(lambda: fn("acme", 1), N) + _row("@cache async hit (callable λ key)", elapsed, ops) + + asyncio.run(run()) + + +# --------------------------------------------------------------------------- +# Runner +# --------------------------------------------------------------------------- def main() -> None: - status = "ok" - error = None - sections: Dict[str, List[Stats]] = {} - - print("advanced_caching benchmark (Async-only)") - print(f"work_ms={CFG.work_ms} seed={CFG.seed} warmup={CFG.warmup} runs={CFG.runs}") - - try: - sections = asyncio.run(run_benchmarks()) - print_table("Hot Cache Hits", sections["hot_hits"]) - except KeyboardInterrupt: - status = "interrupted" - error = "KeyboardInterrupt" - raise - except Exception as e: - status = "error" - error = f"{type(e).__name__}: {e}" - raise - finally: - shutdown_schedulers() - append_json_log(status=status, error=error, sections=sections) + print(f"\n{'=' * 65}") + print(f" advanced-caching benchmark · N={N:,} iterations per test") + print(f"{'=' * 65}") + + suites = [ + ("Storage baseline", [bench_inmem_raw]), + ( + "@cache decorator", + [ + bench_cache_sync_hit, + bench_cache_sync_keyed, + bench_cache_async_hit, + bench_cache_swr_hit, + bench_cache_miss, + ], + ), + ( + "Callable keys", + [ + bench_cache_callable_key, + bench_cache_callable_key_async, + ], + ), + ("Multi-level", [bench_chain_cache]), + ("bg writer/reader", [bench_bg_read]), + ("With metrics", [bench_with_metrics]), + ] + + for section, fns in suites: + print(f"\n ── {section}") + for fn in fns: + fn() + + print(f"\n{'=' * 65}\n") if __name__ == "__main__": diff --git a/tests/profile_decorators.py b/tests/profile_decorators.py index 86340e7..2b0d2bb 100644 --- a/tests/profile_decorators.py +++ b/tests/profile_decorators.py @@ -1,71 +1,102 @@ +""" +Profiler-friendly workload for advanced-caching. + +Designed for use with Scalene, cProfile, or py-spy: + + uv run scalene tests/profile_decorators.py + uv run python -m cProfile -s cumulative tests/profile_decorators.py + py-spy record -o profile.svg -- python tests/profile_decorators.py + +Environment: + PROFILE_N number of iterations (default: 2_000_000) + PROFILE_ASYNC set to "1" to also run async workload +""" + from __future__ import annotations +import asyncio import os -import sys import time -from pathlib import Path -_REPO_ROOT = Path(__file__).resolve().parents[1] -_SRC_DIR = _REPO_ROOT / "src" -if _SRC_DIR.exists() and str(_SRC_DIR) not in sys.path: - sys.path.insert(0, str(_SRC_DIR)) +from advanced_caching import cache, bg, InMemCache -from advanced_caching import BGCache, SWRCache, TTLCache +N = int(os.getenv("PROFILE_N", "2_000_000")) +RUN_ASYNC = os.getenv("PROFILE_ASYNC", "0") == "1" -def _env_int(name: str, default: int) -> int: - raw = os.getenv(name) - if raw is None or raw == "": - return default - return int(raw) +# ── Workloads ──────────────────────────────────────────────────────────────── -def main() -> None: - # This script is designed for profilers (e.g., Scalene): - # - No per-iteration timing - # - Minimal I/O / printing - n = _env_int("PROFILE_N", 2_000_000) +@cache(3600, key="prof_static") +def static_key() -> int: + return 1 - def cheap_work(x: int) -> int: - return x + 1 - @TTLCache.cached("ttl:{}", ttl=60) - def ttl_fn(x: int) -> int: - return cheap_work(x) +@cache(3600, key="prof:{x}") +def named_key(x: int) -> int: + return x - @SWRCache.cached("swr:{}", ttl=60, stale_ttl=30) - def swr_fn(x: int) -> int: - return cheap_work(x) - @BGCache.register_loader("bg", interval_seconds=60) - def bg_loader() -> int: - return cheap_work(1) +@cache(0.0001, stale=3600, key="prof_swr") +def swr_stale() -> int: + return 1 - # Warm caches. - ttl_fn(1) - swr_fn(1) - bg_loader() - # Hot-path loops. - for _ in range(n): - ttl_fn(1) - for _ in range(n): - swr_fn(1) - for _ in range(n): - bg_loader() +@cache(3600, key="prof_async") +async def async_hit() -> int: + return 1 + + +def run_sync() -> None: + # Prime caches + static_key() + named_key(1) + swr_stale() + time.sleep(0.001) # go stale but inside SWR window - # Miss-path loops (smaller: avoid excessive memory growth). - miss_n = max(10_000, n // 100) - for i in range(miss_n): - ttl_fn(i) - for i in range(miss_n): - swr_fn(i) + print(f"Running {N:,} sync iterations...") + t0 = time.perf_counter() - # Give any background work a moment to settle. - time.sleep(0.05) + for _ in range(N): + static_key() - # Stop background scheduler thread to avoid profiler noise. - BGCache.shutdown(wait=False) + t1 = time.perf_counter() + for _ in range(N): + named_key(1) + + t2 = time.perf_counter() + for _ in range(N): + swr_stale() + + t3 = time.perf_counter() + + print( + f" static_key : {(t1 - t0) * 1000:.0f} ms ({N / (t1 - t0) / 1e6:.2f}M ops/s)" + ) + print( + f" named_key : {(t2 - t1) * 1000:.0f} ms ({N / (t2 - t1) / 1e6:.2f}M ops/s)" + ) + print( + f" swr_stale : {(t3 - t2) * 1000:.0f} ms ({N / (t3 - t2) / 1e6:.2f}M ops/s)" + ) + + +async def run_async() -> None: + await async_hit() # prime + n = N // 10 # async is slower; use fewer iterations + + print(f"\nRunning {n:,} async iterations...") + t0 = time.perf_counter() + for _ in range(n): + await async_hit() + elapsed = time.perf_counter() - t0 + print(f" async_hit : {elapsed * 1000:.0f} ms ({n / elapsed / 1e6:.2f}M ops/s)") + + +def main() -> None: + run_sync() + if RUN_ASYNC: + asyncio.run(run_async()) if __name__ == "__main__": diff --git a/tests/test_bg_writer_reader.py b/tests/test_bg_writer_reader.py index a4aa801..e0b00e2 100644 --- a/tests/test_bg_writer_reader.py +++ b/tests/test_bg_writer_reader.py @@ -2,7 +2,7 @@ import time import pytest -from advanced_caching import BGCache, InMemCache +from advanced_caching import bg, InMemCache, InMemoryMetrics @pytest.mark.asyncio @@ -11,28 +11,16 @@ async def test_single_writer_multi_reader_async_with_fallback(): shared_cache = InMemCache() - @BGCache.register_writer( - "shared", interval_seconds=0.01, run_immediately=True, cache=shared_cache - ) + @bg.write(0.01, key="shared", run_immediately=True, store=shared_cache) async def writer(): calls["n"] += 1 return {"value": calls["n"]} - reader_cache = shared_cache - - reader_a = BGCache.get_reader( - "shared", - interval_seconds=0.01, - ttl=None, - run_immediately=True, - cache=reader_cache, + reader_a = bg.read( + "shared", interval=0.01, ttl=None, run_immediately=True, store=shared_cache ) - reader_b = BGCache.get_reader( - "shared", - interval_seconds=0.01, - ttl=None, - run_immediately=True, - cache=reader_cache, + reader_b = bg.read( + "shared", interval=0.01, ttl=None, run_immediately=True, store=shared_cache ) async def wait_for_value(reader, timeout=0.2): @@ -53,62 +41,48 @@ async def wait_for_value(reader, timeout=0.2): v3 = await wait_for_value(reader_a) assert v3 is not None and v3.get("value", 0) >= v1.get("value", 0) - BGCache.shutdown() + bg.shutdown() @pytest.mark.asyncio async def test_reader_without_fallback_returns_none(): - reader = BGCache.get_reader( - "missing", interval_seconds=0, ttl=0, run_immediately=False - ) + reader = bg.read("missing_key_xyz", interval=0, ttl=0, run_immediately=False) assert reader() is None - BGCache.shutdown() + bg.shutdown() def test_single_writer_enforced_sync(): - @BGCache.register_writer( - "only_one", interval_seconds=0.01, run_immediately=False, cache=InMemCache() - ) + @bg.write(0.01, key="only_one", run_immediately=False, store=InMemCache()) def writer(): return 1 with pytest.raises(ValueError): - @BGCache.register_writer("only_one", interval_seconds=0.01) + @bg.write(0.01, key="only_one") def writer2(): return 2 - BGCache.shutdown() + bg.shutdown() @pytest.mark.asyncio async def test_sync_writer_async_reader_fallback_runs_in_executor(): calls = {"n": 0} - shared_cache = InMemCache() - @BGCache.register_writer( - "mix", interval_seconds=0.01, ttl=1, run_immediately=False, cache=shared_cache - ) + @bg.write(0.01, key="mix", ttl=1, run_immediately=False, store=shared_cache) def writer_sync(): calls["n"] += 1 return calls["n"] - reader_async = BGCache.get_reader( - "mix", - interval_seconds=0.01, - ttl=1, - run_immediately=False, - cache=shared_cache, + reader_async = bg.read( + "mix", interval=0.01, ttl=1, run_immediately=False, store=shared_cache ) - # First call triggers load_once pull from source cache (which is empty at start) assert reader_async() is None - # Populate source via writer _ = writer_sync() await asyncio.sleep(0.05) - # Reader should eventually see the value after writer populates source cache. async def wait_for_value(reader, timeout=0.5): start = asyncio.get_event_loop().time() while asyncio.get_event_loop().time() - start < timeout: @@ -119,8 +93,7 @@ async def wait_for_value(reader, timeout=0.5): return None assert await wait_for_value(reader_async) is not None - - BGCache.shutdown() + bg.shutdown() @pytest.mark.asyncio @@ -128,22 +101,13 @@ async def test_e2e_async_writer_reader_background_refresh(): shared_cache = InMemCache() calls = {"n": 0} - @BGCache.register_writer( - "bg_async", - interval_seconds=0.05, - run_immediately=True, - cache=shared_cache, - ) + @bg.write(0.05, key="bg_async", run_immediately=True, store=shared_cache) async def writer_async(): calls["n"] += 1 return {"count": calls["n"]} - reader = BGCache.get_reader( - "bg_async", - interval_seconds=0.05, - ttl=None, - run_immediately=True, - cache=shared_cache, + reader = bg.read( + "bg_async", interval=0.05, ttl=None, run_immediately=True, store=shared_cache ) async def wait_for_value(reader, min_count, timeout=0.5): @@ -157,33 +121,23 @@ async def wait_for_value(reader, min_count, timeout=0.5): first = await wait_for_value(reader, 1) assert first is not None and first.get("count", 0) >= 1 - updated = await wait_for_value(reader, 2) assert updated is not None and updated.get("count", 0) >= 2 - BGCache.shutdown() + bg.shutdown() def test_e2e_sync_writer_reader_background_refresh(): shared_cache = InMemCache() calls = {"n": 0} - @BGCache.register_writer( - "bg_sync", - interval_seconds=0.05, - run_immediately=True, - cache=shared_cache, - ) + @bg.write(0.05, key="bg_sync", run_immediately=True, store=shared_cache) def writer_sync(): calls["n"] += 1 return {"count": calls["n"]} - reader = BGCache.get_reader( - "bg_sync", - interval_seconds=0.05, - ttl=None, - run_immediately=True, - cache=shared_cache, + reader = bg.read( + "bg_sync", interval=0.05, ttl=None, run_immediately=True, store=shared_cache ) def wait_for_value(reader_fn, min_count, timeout=0.5): @@ -197,6 +151,75 @@ def wait_for_value(reader_fn, min_count, timeout=0.5): first = wait_for_value(reader, 1) assert first is not None and first.get("count", 0) >= 1 - updated = wait_for_value(reader, 2) assert updated is not None and updated.get("count", 0) >= 2 + + +def test_multiple_readers_independent_local_caches(): + """Each bg.read() call creates an independent local mirror cache.""" + shared = InMemCache() + shared.set("ikey", {"v": 1}, ttl=3600) + + r1 = bg.read("ikey", interval=1, store=shared) + r2 = bg.read("ikey", interval=1, store=shared) + + assert r1.store is not r2.store, "each reader must have its own local cache" + assert r1() == r2() == {"v": 1} + + bg.shutdown() + + +def test_reader_auto_discovers_writer_store(): + """bg.read(key) with store=None uses the writer's store automatically.""" + writer_store = InMemCache() + writer_store.set("autodisco_key", {"payload": "hello"}, ttl=3600) + + @bg.write(60, key="autodisco_key", store=writer_store, run_immediately=False) + def noop_writer(): + return {} + + reader = bg.read("autodisco_key") # no store= → auto-discover + assert reader() == {"payload": "hello"} + + bg.shutdown() + + +def test_writer_metrics_record_background_refresh(): + """bg.write with metrics= tracks successful background refreshes.""" + metrics = InMemoryMetrics() + store = InMemCache() + calls = {"n": 0} + + @bg.write( + 0.05, key="metered_writer", store=store, metrics=metrics, run_immediately=True + ) + def refresh(): + calls["n"] += 1 + return {"count": calls["n"]} + + time.sleep(0.15) + + stats = metrics.get_stats() + bg_stats = stats.get("background_refresh", {}) + assert "metered_writer" in bg_stats + assert bg_stats["metered_writer"]["success"] >= 1 + + bg.shutdown() + + +def test_writer_metrics_on_error(): + """bg.write with metrics= tracks failed refreshes.""" + metrics = InMemoryMetrics() + + @bg.write(0.05, key="failing_writer", metrics=metrics, run_immediately=True) + def bad_writer(): + raise RuntimeError("intentional") + + time.sleep(0.15) + + stats = metrics.get_stats() + bg_stats = stats.get("background_refresh", {}) + assert "failing_writer" in bg_stats + assert bg_stats["failing_writer"]["failure"] >= 1 + + bg.shutdown() diff --git a/tests/test_configured_cache.py b/tests/test_configured_cache.py index 0a77342..033fe7e 100644 --- a/tests/test_configured_cache.py +++ b/tests/test_configured_cache.py @@ -1,15 +1,13 @@ import pytest import time -from advanced_caching import TTLCache, SWRCache, BGCache, InMemCache +from advanced_caching import cache, bg, InMemCache def test_configured_ttl_cache(): - cache = InMemCache() - MyTTL = TTLCache.configure(cache=cache) - + custom_cache = InMemCache() call_count = 0 - @MyTTL.cached("key", ttl=60) + @cache(60, key="key", store=custom_cache) def func(): nonlocal call_count call_count += 1 @@ -17,7 +15,7 @@ def func(): assert func() == 1 assert call_count == 1 - assert cache.exists("key") + assert custom_cache.exists("key") # Should hit cache assert func() == 1 @@ -25,12 +23,10 @@ def func(): def test_configured_swr_cache(): - cache = InMemCache() - MySWR = SWRCache.configure(cache=cache) - + custom_cache = InMemCache() call_count = 0 - @MySWR.cached("swr", ttl=60) + @cache(60, key="swr", store=custom_cache) def func(): nonlocal call_count call_count += 1 @@ -38,7 +34,7 @@ def func(): assert func() == 2 assert call_count == 1 - assert cache.exists("swr") + assert custom_cache.exists("swr") # Should hit cache assert func() == 2 @@ -46,12 +42,10 @@ def func(): def test_configured_bg_cache(): - cache = InMemCache() - MyBG = BGCache.configure(cache=cache) - + custom_cache = InMemCache() call_count = 0 - @MyBG.register_loader("bg", interval_seconds=60, run_immediately=True) + @bg(60, key="bg", run_immediately=True, store=custom_cache) def func(): nonlocal call_count call_count += 1 @@ -62,4 +56,4 @@ def func(): assert func() == 3 assert call_count >= 1 - assert cache.exists("bg") + assert custom_cache.exists("bg") diff --git a/tests/test_correctness.py b/tests/test_correctness.py index b6eeeec..031e1dc 100644 --- a/tests/test_correctness.py +++ b/tests/test_correctness.py @@ -9,10 +9,9 @@ import time from advanced_caching import ( - BGCache, + bg, + cache, InMemCache, - TTLCache, - SWRCache, HybridCache, validate_cache_storage, ) @@ -23,7 +22,7 @@ async def cleanup(): """Clean up scheduler between tests.""" yield try: - BGCache.shutdown(wait=False) + bg.shutdown(wait=False) except: pass await asyncio.sleep(0.05) @@ -38,7 +37,7 @@ async def test_basic_caching(self): """Test basic TTL caching with function calls.""" call_count = {"count": 0} - @TTLCache.cached("user:{}", ttl=60) + @cache(60, key="user:{}") async def get_user(user_id): call_count["count"] += 1 return {"id": user_id, "name": f"User{user_id}"} @@ -62,7 +61,7 @@ async def test_ttl_expiration(self): """Test that cache expires after TTL.""" call_count = {"count": 0} - @TTLCache.cached("data:{}", ttl=0.2) + @cache(0.2, key="data:{}") async def get_data(key): call_count["count"] += 1 return {"key": key, "count": call_count["count"]} @@ -89,7 +88,7 @@ async def test_custom_cache_backend(self): """Test TTLCache with custom backend.""" custom_cache = InMemCache() - @TTLCache.cached("item:{}", ttl=60, cache=custom_cache) + @cache(60, key="item:{}", store=custom_cache) async def get_item(item_id): return {"id": item_id} @@ -102,7 +101,7 @@ async def get_item(item_id): async def test_callable_key_function(self): """Test TTLCache with callable key function.""" - @TTLCache.cached(key=lambda user_id: f"user:{user_id}", ttl=60) + @cache(60, key=lambda user_id: f"user:{user_id}") async def get_user(user_id): return {"id": user_id} @@ -112,16 +111,16 @@ async def get_user(user_id): async def test_isolated_caches(self): """Test that each TTL cached function has its own cache.""" - @TTLCache.cached("user:{}", ttl=60) + @cache(60, key="user:{}") async def get_user(user_id): return {"type": "user", "id": user_id} - @TTLCache.cached("product:{}", ttl=60) + @cache(60, key="product:{}") async def get_product(product_id): return {"type": "product", "id": product_id} # Each should have its own cache - assert get_user._cache is not get_product._cache + assert get_user.store is not get_product.store # Both should work assert (await get_user(1))["type"] == "user" @@ -136,7 +135,7 @@ async def test_fresh_cache_hit(self): """Test SWR with fresh cache returns immediately.""" call_count = {"count": 0} - @SWRCache.cached("user:{}", ttl=60, stale_ttl=30) + @cache(60, stale=30, key="user:{}") async def get_user(user_id): call_count["count"] += 1 return {"id": user_id, "count": call_count["count"]} @@ -155,7 +154,7 @@ async def test_stale_with_background_refresh(self): """Test SWR serves stale data while refreshing in background.""" call_count = {"count": 0} - @SWRCache.cached("data:{}", ttl=0.2, stale_ttl=0.5) + @cache(0.2, stale=0.5, key="data:{}") async def get_data(key): call_count["count"] += 1 return {"key": key, "count": call_count["count"]} @@ -183,7 +182,7 @@ async def test_too_stale_refetch(self): """Test SWR refetches when too stale.""" call_count = {"count": 0} - @SWRCache.cached("data:{}", ttl=0.1, stale_ttl=0.1) + @cache(0.1, stale=0.1, key="data:{}") async def get_data(key): call_count["count"] += 1 return {"key": key, "count": call_count["count"]} @@ -204,7 +203,7 @@ async def test_custom_cache_backend(self): """Test SWRCache with custom backend.""" custom_cache = InMemCache() - @SWRCache.cached("item:{}", ttl=60, stale_ttl=30, cache=custom_cache) + @cache(60, stale=30, key="item:{}", store=custom_cache) async def get_item(item_id): return {"id": item_id} @@ -221,9 +220,7 @@ async def test_async_loader_immediate(self): """Test async loader with immediate execution.""" call_count = {"count": 0} - @BGCache.register_loader( - "async_test", interval_seconds=10, run_immediately=True - ) + @bg(10, key="async_test", run_immediately=True) async def load_data(): call_count["count"] += 1 return {"value": call_count["count"]} @@ -244,9 +241,7 @@ async def test_sync_loader_no_immediate(self): """Test sync loader without immediate execution.""" call_count = {"count": 0} - @BGCache.register_loader( - "no_immediate", interval_seconds=10, run_immediately=False - ) + @bg(10, key="no_immediate", run_immediately=False) def load_data(): call_count["count"] += 1 return {"value": call_count["count"]} @@ -265,9 +260,7 @@ async def test_custom_cache_backend(self): """Test BGCache using custom cache backend.""" custom_cache = InMemCache() - @BGCache.register_loader( - "custom", interval_seconds=10, run_immediately=True, cache=custom_cache - ) + @bg(10, key="custom", run_immediately=True, store=custom_cache) async def load_data(): return {"custom": True} @@ -284,20 +277,18 @@ async def load_data(): async def test_isolated_cache_instances(self): """Test that each loader has its own cache.""" - @BGCache.register_loader("loader1", interval_seconds=10, run_immediately=True) + @bg(10, key="loader1", run_immediately=True) async def load1(): return {"id": 1} - @BGCache.register_loader("loader2", interval_seconds=10, run_immediately=True) + @bg(10, key="loader2", run_immediately=True) async def load2(): return {"id": 2} await asyncio.sleep(0.1) # Each should have its own cache - assert load1._cache is not load2._cache - assert load1._cache_key == "loader1" - assert load2._cache_key == "loader2" + assert load1.store is not load2.store # Each should have correct data assert (await load1()) == {"id": 1} @@ -312,12 +303,7 @@ def error_handler(e): errors.append(e) error_event.set() - @BGCache.register_loader( - "error_test", - interval_seconds=10, - run_immediately=True, - on_error=error_handler, - ) + @bg(10, key="error_test", run_immediately=True, on_error=error_handler) async def load_data(): raise ValueError("Test error") @@ -336,7 +322,7 @@ async def test_periodic_refresh(self): call_count = {"count": 0} load_event = asyncio.Event() - @BGCache.register_loader("periodic", interval_seconds=0.1, run_immediately=True) + @bg(0.1, key="periodic", run_immediately=True) async def load_data(): call_count["count"] += 1 load_event.set() @@ -367,15 +353,15 @@ async def load_data(): async def test_multiple_loaders(self): """Test multiple loaders can coexist.""" - @BGCache.register_loader("loader_a", interval_seconds=10, run_immediately=True) + @bg(10, key="loader_a", run_immediately=True) async def load_a(): return {"name": "a"} - @BGCache.register_loader("loader_b", interval_seconds=10, run_immediately=True) + @bg(10, key="loader_b", run_immediately=True) async def load_b(): return {"name": "b"} - @BGCache.register_loader("loader_c", interval_seconds=10, run_immediately=True) + @bg(10, key="loader_c", run_immediately=True) async def load_c(): return {"name": "c"} @@ -390,11 +376,11 @@ async def test_lambda_cache_factory(self): """Test BGCache with lambda returning HybridCache.""" call_count = {"count": 0} - @BGCache.register_loader( - "test_lambda_cache", - interval_seconds=3600, + @bg( + 3600, + key="test_lambda_cache", run_immediately=True, - cache=lambda: HybridCache( + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=InMemCache(), l1_ttl=60 ), ) @@ -413,9 +399,9 @@ async def get_test_data() -> dict[str, str]: assert call_count["count"] == 1 # No additional call # Verify cache object was created correctly - assert hasattr(get_test_data, "_cache") - assert get_test_data._cache is not None - assert isinstance(get_test_data._cache, HybridCache) + assert hasattr(get_test_data, "store") + assert get_test_data.store is not None + assert isinstance(get_test_data.store, HybridCache) @pytest.mark.asyncio @@ -426,7 +412,7 @@ class TestCachePerformance: async def test_cache_hit_speed(self): """Test that cache hits are fast.""" - @BGCache.register_loader("perf_test", interval_seconds=10, run_immediately=True) + @bg(10, key="perf_test", run_immediately=True) async def load_data(): await asyncio.sleep(0.01) # Simulate slow operation return {"data": "value"} @@ -447,7 +433,7 @@ async def load_data(): async def test_ttl_cache_hit_speed(self): """Test TTLCache hit speed.""" - @TTLCache.cached("item:{}", ttl=60) + @cache(60, key="item:{}") async def get_item(item_id): await asyncio.sleep(0.001) # Simulate work return {"id": item_id} @@ -472,7 +458,7 @@ class TestKeyTemplates: async def test_ttl_positional_template(self): calls = {"n": 0} - @TTLCache.cached("user:{}", ttl=60) + @cache(60, key="user:{}") async def get_user(user_id: int): calls["n"] += 1 return {"id": user_id} @@ -484,7 +470,7 @@ async def get_user(user_id: int): async def test_ttl_named_template(self): calls = {"n": 0} - @TTLCache.cached("user:{user_id}", ttl=60) + @cache(60, key="user:{user_id}") async def get_user(*, user_id: int): calls["n"] += 1 return {"id": user_id} @@ -496,10 +482,10 @@ async def get_user(*, user_id: int): async def test_swr_default_arg_with_key_function(self): calls = {"n": 0} - @SWRCache.cached( + @cache( + 5, + stale=10, key=lambda *a, **k: f"i18n:all:{k.get('lang', a[0] if a else 'en')}", - ttl=5, - stale_ttl=10, ) async def load_all(lang: str = "en") -> dict: calls["n"] += 1 @@ -517,7 +503,7 @@ async def load_all(lang: str = "en") -> dict: async def test_swr_named_template_with_kwargs(self): calls = {"n": 0} - @SWRCache.cached("i18n:{lang}", ttl=5, stale_ttl=10) + @cache(5, stale=10, key="i18n:{lang}") async def load_i18n(*, lang: str = "en") -> dict: calls["n"] += 1 return {"hello": f"Hello in {lang}"} @@ -531,7 +517,7 @@ async def load_i18n(*, lang: str = "en") -> dict: async def test_swr_positional_template_with_args(self): calls = {"n": 0} - @SWRCache.cached("i18n:{}", ttl=5, stale_ttl=10) + @cache(5, stale=10, key="i18n:{}") async def load_i18n(lang: str) -> dict: calls["n"] += 1 return {"hello": f"Hello in {lang}"} @@ -545,7 +531,7 @@ async def load_i18n(lang: str) -> dict: async def test_swr_named_template_with_extra_kwargs(self): calls = {"n": 0} - @SWRCache.cached("i18n:{lang}", ttl=5, stale_ttl=10) + @cache(5, stale=10, key="i18n:{lang}") async def load_i18n(lang: str, region: str | None = None) -> dict: calls["n"] += 1 suffix = f"-{region}" if region else "" @@ -561,7 +547,7 @@ async def test_ttl_named_template_with_positional_arg(self): """Test named placeholder with positional argument in TTLCache.""" calls = {"n": 0} - @TTLCache.cached("user:{user_id}", ttl=60) + @cache(60, key="user:{user_id}") async def get_user(user_id: int): calls["n"] += 1 return {"id": user_id} @@ -579,7 +565,7 @@ async def test_swr_named_template_with_positional_arg(self): """Test named placeholder with positional argument in SWRCache.""" calls = {"n": 0} - @SWRCache.cached("item:{item_id}", ttl=60) + @cache(60, key="item:{item_id}") async def get_item(item_id: int): calls["n"] += 1 return {"id": item_id} @@ -593,7 +579,7 @@ async def test_multiple_named_placeholders_mixed_args(self): """Test multiple named placeholders with mixed positional and keyword args.""" calls = {"n": 0} - @TTLCache.cached("u:{uid}:g:{gid}", ttl=60) + @cache(60, key="u:{uid}:g:{gid}") async def get_data(uid: int, gid: int): calls["n"] += 1 return f"{uid}-{gid}" @@ -670,7 +656,7 @@ class TestDecoratorKeyEdgeCases: async def test_ttl_key_without_placeholders(self): calls = {"n": 0} - @TTLCache.cached("static-key", ttl=60) + @cache(60, key="static-key") async def f(user_id: int): calls["n"] += 1 return user_id @@ -682,7 +668,7 @@ async def f(user_id: int): async def test_swr_key_without_args_or_kwargs(self): calls = {"n": 0} - @SWRCache.cached("static", ttl=1, stale_ttl=1) + @cache(1, stale=1, key="static") async def f() -> int: calls["n"] += 1 return calls["n"] @@ -697,7 +683,7 @@ async def test_swr_key_template_single_kwarg_positional_fallback(self): calls = {"n": 0} # Template with positional placeholder but only kwarg passed - @SWRCache.cached("foo:{}", ttl=1, stale_ttl=1) + @cache(1, stale=1, key="foo:{}") async def f(*, x: int) -> int: calls["n"] += 1 return x @@ -710,7 +696,7 @@ async def test_swr_invalid_format_falls_back_to_raw_key(self): calls = {"n": 0} # Template expects named field that is never provided; we only pass kwargs - @SWRCache.cached("foo:{missing}", ttl=1, stale_ttl=1) + @cache(1, stale=1, key="foo:{missing}") async def f(*, x: int) -> int: calls["n"] += 1 return x @@ -851,7 +837,7 @@ class TestNoCachingWhenZero: async def test_ttlcache_ttl_zero_disables_caching(self): calls = {"n": 0} - @TTLCache.cached("user:{}", ttl=0) + @cache(0, key="user:{}") async def get_user(user_id: int) -> int: calls["n"] += 1 return calls["n"] @@ -865,7 +851,7 @@ async def get_user(user_id: int) -> int: async def test_swrcache_ttl_zero_disables_caching(self): calls = {"n": 0} - @SWRCache.cached("data:{}", ttl=0, stale_ttl=10) + @cache(0, stale=10, key="data:{}") async def get_data(key: str) -> int: calls["n"] += 1 return calls["n"] @@ -879,7 +865,7 @@ async def get_data(key: str) -> int: async def test_bgcache_interval_zero_disables_background_and_cache(self): calls = {"n": 0} - @BGCache.register_loader(key="no_bg", interval_seconds=0, ttl=None) + @bg(0, key="no_bg", ttl=None) async def load_data() -> int: calls["n"] += 1 return calls["n"] @@ -893,7 +879,7 @@ async def load_data() -> int: async def test_bgcache_ttl_zero_disables_background_and_cache(self): calls = {"n": 0} - @BGCache.register_loader(key="no_bg_ttl", interval_seconds=10, ttl=0) + @bg(10, key="no_bg_ttl", ttl=0) async def load_data() -> int: calls["n"] += 1 return calls["n"] diff --git a/tests/test_gcs_cache_integration.py b/tests/test_gcs_cache_integration.py index f05e377..b98bc4c 100644 --- a/tests/test_gcs_cache_integration.py +++ b/tests/test_gcs_cache_integration.py @@ -4,10 +4,12 @@ try: from google.cloud import storage + import requests as _requests except ImportError: # pragma: no cover storage = None + _requests = None # type: ignore -from advanced_caching import GCSCache, TTLCache, SWRCache, ChainCache, InMemCache +from advanced_caching import cache, GCSCache, ChainCache, InMemCache EMULATOR = os.getenv("STORAGE_EMULATOR_HOST") or "http://localhost:4443" USE_EMULATOR = bool(EMULATOR) @@ -17,6 +19,22 @@ ) +@pytest.fixture(autouse=True, scope="module") +def require_gcs_emulator(): + if USE_EMULATOR and not _emulator_available(): + pytest.skip(f"GCS emulator not reachable at {EMULATOR}") + + +def _emulator_available() -> bool: + if _requests is None: + return False + try: + _requests.get(EMULATOR, timeout=2) + return True + except Exception: + return False + + def _client(): if storage is None: return None @@ -37,19 +55,19 @@ def test_gcscache_set_get_and_dedupe(): except Exception: pass - cache = GCSCache( + store = GCSCache( bucket="test-bkt", prefix="t/", client=client, serializer="json", dedupe_writes=True, ) - cache.set("k1", {"v": 1}, ttl=0) - assert cache.get("k1") == {"v": 1} + store.set("k1", {"v": 1}, ttl=0) + assert store.get("k1") == {"v": 1} - cache.set("k1", {"v": 1}, ttl=0) # dedupe should skip rewrite - cache.set("k1", {"v": 2}, ttl=0) - assert cache.get("k1") == {"v": 2} + store.set("k1", {"v": 1}, ttl=0) # dedupe should skip rewrite + store.set("k1", {"v": 2}, ttl=0) + assert store.get("k1") == {"v": 2} @pytest.mark.integration @@ -63,7 +81,7 @@ def test_ttlcache_with_gcscache_decorator(): except Exception: pass - cache = GCSCache( + store = GCSCache( bucket="test-bkt2", prefix="u/", client=client, @@ -74,7 +92,7 @@ def test_ttlcache_with_gcscache_decorator(): calls = {"n": 0} - @TTLCache.cached("user:{user_id}", ttl=0.2, cache=cache) + @cache(0.2, key="user:{user_id}", store=store) def fetch_user(user_id: int): calls["n"] += 1 return {"id": user_id, "n": calls["n"]} @@ -85,7 +103,7 @@ def fetch_user(user_id: int): time.sleep(1.0) # Force delete to ensure cache miss if TTL/time drift is an issue - cache.delete("user:1") + store.delete("user:1") third = fetch_user(1) # TTL expired, should recompute assert third["n"] >= 2 @@ -100,13 +118,13 @@ def test_swrcache_with_gcscache_decorator(): client.create_bucket(bucket) except Exception: pass - cache = GCSCache( + store = GCSCache( bucket="test-bkt-swr", prefix="swr/", client=client, serializer="json" ) calls = {"n": 0} - @SWRCache.cached("data:{id}", ttl=0.5, stale_ttl=1.0, cache=cache) + @cache(0.5, stale=1.0, key="data:{id}", store=store) def fetch_data(id: int): calls["n"] += 1 return {"id": id, "n": calls["n"]} @@ -119,7 +137,7 @@ def fetch_data(id: int): v2 = fetch_data(1) assert v2["n"] == 1 - # 3. Wait for TTL to expire but within stale_ttl + # 3. Wait for TTL to expire but within stale window time.sleep(0.6) # Should return stale value immediately, trigger background refresh v3 = fetch_data(1) diff --git a/tests/test_integration_redis.py b/tests/test_integration_redis.py index 55bcc92..90b1e74 100644 --- a/tests/test_integration_redis.py +++ b/tests/test_integration_redis.py @@ -18,10 +18,9 @@ HAS_REDIS = False from advanced_caching import ( + cache, + bg, CacheEntry, - TTLCache, - SWRCache, - BGCache, RedisCache, HybridCache, InMemCache, @@ -32,7 +31,7 @@ @pytest.fixture(autouse=True) async def reset_scheduler(): yield - BGCache.shutdown(wait=False) + bg.shutdown(wait=False) @pytest.fixture(scope="module") @@ -41,8 +40,12 @@ def redis_container(): if not HAS_REDIS: pytest.skip("testcontainers[redis] not installed") - container = RedisContainer(image="redis:7-alpine") - container.start() + try: + container = RedisContainer(image="redis:7-alpine") + container.start() + except Exception: + pytest.skip("Docker not available — skipping Redis integration tests") + yield container container.stop() @@ -188,14 +191,14 @@ def loads(data: bytes) -> Any: @pytest.mark.asyncio class TestTTLCacheWithRedis: - """Test TTLCache decorator with Redis backend.""" + """Test cache() TTL decorator with Redis backend.""" async def test_ttlcache_redis_basic(self, redis_client): - """Test TTLCache with Redis backend.""" + """Test cache() with Redis backend.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="ttl:") + store = RedisCache(redis_client, prefix="ttl:") - @TTLCache.cached("user:{}", ttl=60, cache=cache) + @cache(60, key="user:{}", store=store) async def get_user(user_id: int): calls["n"] += 1 return {"id": user_id, "name": f"User{user_id}"} @@ -213,11 +216,11 @@ async def get_user(user_id: int): assert calls["n"] == 2 async def test_ttlcache_redis_expiration(self, redis_client): - """Test TTLCache with Redis respects TTL.""" + """Test cache() with Redis respects TTL.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="ttl:") + store = RedisCache(redis_client, prefix="ttl:") - @TTLCache.cached("data:{}", ttl=1, cache=cache) + @cache(1, key="data:{}", store=store) async def get_data(key: str): calls["n"] += 1 return f"data_{key}" @@ -236,11 +239,11 @@ async def get_data(key: str): assert calls["n"] == 2 async def test_ttlcache_redis_named_template(self, redis_client): - """Test TTLCache with Redis using named key template.""" + """Test cache() with Redis using named key template.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="ttl:") + store = RedisCache(redis_client, prefix="ttl:") - @TTLCache.cached("product:{product_id}", ttl=60, cache=cache) + @cache(60, key="product:{product_id}", store=store) async def get_product(*, product_id: int): calls["n"] += 1 return {"id": product_id, "name": f"Product{product_id}"} @@ -255,14 +258,14 @@ async def get_product(*, product_id: int): @pytest.mark.asyncio class TestSWRCacheWithRedis: - """Test SWRCache with Redis backend.""" + """Test cache() SWR with Redis backend.""" async def test_swrcache_redis_basic(self, redis_client): - """Test SWRCache with Redis backend.""" + """Test SWR cache with Redis backend.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="swr:") + store = RedisCache(redis_client, prefix="swr:") - @SWRCache.cached("product:{}", ttl=1, stale_ttl=1, cache=cache) + @cache(1, stale=1, key="product:{}", store=store) async def get_product(product_id: int): calls["n"] += 1 return {"id": product_id, "count": calls["n"]} @@ -276,11 +279,11 @@ async def get_product(product_id: int): assert calls["n"] == 1 async def test_swrcache_redis_stale_serve(self, redis_client): - """Test SWRCache serves stale data while refreshing.""" + """Test SWR serves stale data while refreshing.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="swr:") + store = RedisCache(redis_client, prefix="swr:") - @SWRCache.cached("data:{}", ttl=0.3, stale_ttl=0.5, cache=cache) + @cache(0.3, stale=0.5, key="data:{}", store=store) async def get_data(key: str): calls["n"] += 1 return {"key": key, "count": calls["n"]} @@ -302,19 +305,14 @@ async def get_data(key: str): @pytest.mark.asyncio class TestBGCacheWithRedis: - """Test BGCache with Redis backend.""" + """Test bg() with Redis backend.""" async def test_bgcache_redis_sync_loader(self, redis_client): - """Test BGCache with sync loader and Redis backend.""" + """Test bg with async loader and Redis backend.""" calls = {"n": 0} - cache = RedisCache(redis_client, prefix="bg:") + store = RedisCache(redis_client, prefix="bg:") - @BGCache.register_loader( - key="inventory", - interval_seconds=10, - run_immediately=True, - cache=cache, - ) + @bg(10, key="inventory", store=store, run_immediately=True) async def load_inventory(): calls["n"] += 1 return {"items": [f"item_{i}" for i in range(3)]} @@ -330,19 +328,19 @@ async def load_inventory(): assert calls["n"] == 1 async def test_bgcache_redis_with_error_handler(self, redis_client): - """Test BGCache error handling with Redis.""" + """Test bg error handling with Redis.""" errors = [] - cache = RedisCache(redis_client, prefix="bg:") + store = RedisCache(redis_client, prefix="bg:") def on_error(exc): errors.append(exc) - @BGCache.register_loader( + @bg( + 10, key="failing_loader", - interval_seconds=10, + store=store, run_immediately=True, on_error=on_error, - cache=cache, ) async def failing_loader(): raise ValueError("Simulated failure") @@ -390,9 +388,9 @@ def test_hybridcache_l1_miss_l2_hit(self, redis_client): @pytest.mark.asyncio async def test_hybridcache_with_ttlcache(self, redis_client): - """Test TTLCache using HybridCache backend.""" + """Test cache() using HybridCache backend.""" l2 = RedisCache(redis_client, prefix="hybrid_ttl:") - cache = HybridCache( + store = HybridCache( l1_cache=InMemCache(), l2_cache=l2, l1_ttl=60, @@ -400,7 +398,7 @@ async def test_hybridcache_with_ttlcache(self, redis_client): calls = {"n": 0} - @TTLCache.cached("user:{}", ttl=60, cache=cache) + @cache(60, key="user:{}", store=store) async def get_user(user_id: int): calls["n"] += 1 return {"id": user_id} @@ -509,18 +507,13 @@ def test_hybridcache_l2_ttl_shorter_than_requested(self, redis_client): @pytest.mark.asyncio async def test_hybridcache_with_bgcache_and_l2_ttl(self, redis_client): - """Test BGCache with HybridCache using l2_ttl.""" + """Test bg() with HybridCache using l2_ttl.""" l2 = RedisCache(redis_client, prefix="hybrid_bg:") - cache = HybridCache(l1_cache=InMemCache(), l2_cache=l2, l1_ttl=10, l2_ttl=60) + store = HybridCache(l1_cache=InMemCache(), l2_cache=l2, l1_ttl=10, l2_ttl=60) calls = {"n": 0} - @BGCache.register_loader( - key="config_with_l2", - interval_seconds=30, - run_immediately=True, - cache=cache, - ) + @bg(30, key="config_with_l2", store=store, run_immediately=True) async def load_config(): calls["n"] += 1 return {"setting": "value", "count": calls["n"]} @@ -687,17 +680,18 @@ class TestCacheRehydration: """Test that decorators can retrieve existing data from Redis without re-executing functions.""" async def test_ttlcache_rehydrates_from_redis(self, redis_client): - """Test TTLCache retrieves existing Redis data without executing function.""" - # Pre-populate Redis + """Test cache() retrieves existing Redis data without executing function.""" + # Pre-populate Redis using the library's own serialization format test_data = {"result": "from_redis"} - redis_client.setex("compute:42", 60, pickle.dumps(test_data)) + pre_cache = RedisCache(redis_client) + pre_cache.set("compute:42", test_data, ttl=60) call_count = 0 - @TTLCache.cached( - "compute:{}", - ttl=60, - cache=lambda: HybridCache( + @cache( + 60, + key="compute:{}", + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -719,7 +713,7 @@ async def compute(x): assert call_count == 0 async def test_swrcache_rehydrates_from_redis(self, redis_client): - """Test SWRCache retrieves existing Redis data without executing function.""" + """Test SWR cache retrieves existing Redis data without executing function.""" # Pre-populate Redis with CacheEntry now = time.time() entry = CacheEntry( @@ -730,11 +724,11 @@ async def test_swrcache_rehydrates_from_redis(self, redis_client): call_count = 0 - @SWRCache.cached( - "fetch:{}", - ttl=60, - stale_ttl=30, - cache=lambda: HybridCache( + @cache( + 60, + stale=30, + key="fetch:{}", + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -756,18 +750,19 @@ async def fetch(x): assert call_count == 0 async def test_bgcache_rehydrates_from_redis(self, redis_client): - """Test BGCache retrieves existing Redis data without executing function on init.""" - # Pre-populate Redis + """Test bg() retrieves existing Redis data without executing function on init.""" + # Pre-populate Redis using the library's own serialization format test_data = {"users": ["Alice", "Bob", "Charlie"]} - redis_client.setex("users_list_rehydrate", 60, pickle.dumps(test_data)) + pre_cache = RedisCache(redis_client) + pre_cache.set("users_list_rehydrate", test_data, ttl=60) call_count = 0 - @BGCache.register_loader( + @bg( + 60, key="users_list_rehydrate", - interval_seconds=60, run_immediately=True, - cache=lambda: HybridCache( + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -786,18 +781,18 @@ async def load_users(): assert result == test_data assert call_count == 0 - BGCache.shutdown(wait=False) + bg.shutdown(wait=False) async def test_ttlcache_executes_on_cache_miss(self, redis_client): - """Test TTLCache executes function when Redis is empty.""" + """Test cache() executes function when Redis is empty.""" redis_client.flushdb() call_count = 0 - @TTLCache.cached( - "compute:{}", - ttl=60, - cache=lambda: HybridCache( + @cache( + 60, + key="compute:{}", + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -819,16 +814,16 @@ async def compute(x): assert call_count == 1 async def test_swrcache_executes_on_cache_miss(self, redis_client): - """Test SWRCache executes function when Redis is empty.""" + """Test SWR cache executes function when Redis is empty.""" redis_client.flushdb() call_count = 0 - @SWRCache.cached( - "fetch:{}", - ttl=60, - stale_ttl=30, - cache=lambda: HybridCache( + @cache( + 60, + stale=30, + key="fetch:{}", + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -850,16 +845,16 @@ async def fetch(x): assert call_count == 1 async def test_bgcache_executes_on_cache_miss(self, redis_client): - """Test BGCache executes function on init when Redis is empty.""" + """Test bg() executes function on init when Redis is empty.""" redis_client.flushdb() call_count = 0 - @BGCache.register_loader( + @bg( + 60, key="empty_test_bgcache", - interval_seconds=60, run_immediately=True, - cache=lambda: HybridCache( + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -879,20 +874,21 @@ async def load_data(): assert result == {"data": "fresh_load"} assert call_count == 1 - BGCache.shutdown(wait=False) + bg.shutdown(wait=False) async def test_ttlcache_different_args_separate_entries(self, redis_client): - """Test TTLCache creates separate cache entries for different arguments.""" + """Test cache() creates separate cache entries for different arguments.""" # Pre-populate Redis with data for arg=10 test_data = {"result": "from_redis_10"} - redis_client.setex("compute:10", 60, pickle.dumps(test_data)) + pre_cache = RedisCache(redis_client) + pre_cache.set("compute:10", test_data, ttl=60) call_count = 0 - @TTLCache.cached( - "compute:{}", - ttl=60, - cache=lambda: HybridCache( + @cache( + 60, + key="compute:{}", + store=lambda: HybridCache( l1_cache=InMemCache(), l2_cache=RedisCache(redis_client=redis_client), l1_ttl=60, @@ -941,10 +937,10 @@ def test_redis_cache_hit_performance(self, redis_client): @pytest.mark.asyncio async def test_ttlcache_with_redis_performance(self, redis_client): - """Test TTLCache performance with Redis backend.""" - cache = RedisCache(redis_client, prefix="perf_ttl:") + """Test cache() performance with Redis backend.""" + store = RedisCache(redis_client, prefix="perf_ttl:") - @TTLCache.cached("item:{}", ttl=60, cache=cache) + @cache(60, key="item:{}", store=store) async def get_item(item_id: int): return {"id": item_id} @@ -957,7 +953,7 @@ async def get_item(item_id: int): avg_time_ms = (duration / 1000) * 1000 - assert avg_time_ms < 25, f"TTLCache hit too slow: {avg_time_ms:.3f}ms" + assert avg_time_ms < 25, f"cache hit too slow: {avg_time_ms:.3f}ms" if __name__ == "__main__": diff --git a/tests/test_key_generation.py b/tests/test_key_generation.py index 3c65c7e..b248f01 100644 --- a/tests/test_key_generation.py +++ b/tests/test_key_generation.py @@ -1,10 +1,17 @@ +from __future__ import annotations + +import asyncio +import hashlib + import pytest -from advanced_caching.decorators import _create_smart_key_fn + +from advanced_caching import cache +from advanced_caching._cache import _make_key_fn class TestSmartKeyGeneration: """ - Unit tests for _create_smart_key_fn to ensure robust cache key generation. + Unit tests for _make_key_fn to ensure robust cache key generation. """ def test_static_key(self): @@ -13,7 +20,7 @@ def test_static_key(self): def func(a, b): pass - key_fn = _create_smart_key_fn("static-key", func) + key_fn = _make_key_fn("static-key", func) assert key_fn(1, 2) == "static-key" assert key_fn(a=1, b=2) == "static-key" @@ -26,7 +33,7 @@ def func(a): def my_key_gen(a): return f"custom:{a}" - key_fn = _create_smart_key_fn(my_key_gen, func) + key_fn = _make_key_fn(my_key_gen, func) assert key_fn(1) == "custom:1" def test_simple_positional_optimization(self): @@ -35,7 +42,7 @@ def test_simple_positional_optimization(self): def func(user_id): pass - key_fn = _create_smart_key_fn("user:{}", func) + key_fn = _make_key_fn("user:{}", func) # Positional arg assert key_fn(123) == "user:123" @@ -52,7 +59,7 @@ def test_named_placeholder_kwargs(self): def func(user_id): pass - key_fn = _create_smart_key_fn("user:{user_id}", func) + key_fn = _make_key_fn("user:{user_id}", func) assert key_fn(user_id=123) == "user:123" def test_named_placeholder_positional(self): @@ -61,7 +68,7 @@ def test_named_placeholder_positional(self): def func(user_id, other): pass - key_fn = _create_smart_key_fn("user:{user_id}", func) + key_fn = _make_key_fn("user:{user_id}", func) assert key_fn(123, "ignore") == "user:123" def test_named_placeholder_defaults(self): @@ -70,7 +77,7 @@ def test_named_placeholder_defaults(self): def func(user_id=999): pass - key_fn = _create_smart_key_fn("user:{user_id}", func) + key_fn = _make_key_fn("user:{user_id}", func) # Use default assert key_fn() == "user:999" @@ -84,7 +91,7 @@ def test_mixed_args_and_kwargs(self): def func(a, b, c): pass - key_fn = _create_smart_key_fn("{a}:{b}:{c}", func) + key_fn = _make_key_fn("{a}:{b}:{c}", func) # a=1 (pos), b=2 (pos), c=3 (kw) assert key_fn(1, 2, c=3) == "1:2:3" @@ -97,7 +104,7 @@ def test_fallback_to_raw_positional(self): def func(a, b): pass - key_fn = _create_smart_key_fn("{}:{}", func) + key_fn = _make_key_fn("{}:{}", func) assert key_fn(1, 2) == "1:2" def test_missing_argument_returns_template(self): @@ -106,7 +113,7 @@ def test_missing_argument_returns_template(self): def func(a, b): pass - key_fn = _create_smart_key_fn("key:{a}", func) + key_fn = _make_key_fn("key:{a}", func) # 'a' is missing from args/kwargs and has no default # format() raises KeyError, fallback format(*args) raises IndexError/ValueError @@ -119,7 +126,7 @@ def test_extra_kwargs_in_template(self): def func(a, **kwargs): pass - key_fn = _create_smart_key_fn("{a}:{extra}", func) + key_fn = _make_key_fn("{a}:{extra}", func) assert key_fn(1, extra="value") == "1:value" @@ -129,7 +136,7 @@ def test_complex_positional_no_optimization(self): def func(a, b): pass - key_fn = _create_smart_key_fn("prefix:{}-suffix:{}", func) + key_fn = _make_key_fn("prefix:{}-suffix:{}", func) assert key_fn(1, 2) == "prefix:1-suffix:2" def test_format_specifiers(self): @@ -138,7 +145,7 @@ def test_format_specifiers(self): def func(price): pass - key_fn = _create_smart_key_fn("price:{price:.2f}", func) + key_fn = _make_key_fn("price:{price:.2f}", func) assert key_fn(12.3456) == "price:12.35" def test_object_str_representation(self): @@ -154,5 +161,191 @@ def __str__(self): def func(user): pass - key_fn = _create_smart_key_fn("obj:{user}", func) + key_fn = _make_key_fn("obj:{user}", func) assert key_fn(User(42)) == "obj:User(42)" + + +class TestCallableKeys: + """ + Tests for callable cache keys — lambdas, named functions, hashing, and + end-to-end integration with the @cache decorator. + """ + + # ── _make_key_fn unit tests ───────────────────────────────────────────── + + def test_callable_passthrough(self): + """Callable is returned as-is and called with the same args as the fn.""" + + def func(user_id: int): + pass + + key_fn = _make_key_fn(lambda user_id: f"u:{user_id}", func) + assert key_fn(42) == "u:42" + assert key_fn(0) == "u:0" + + def test_callable_uses_keyword_arg(self): + """Callable receives kwargs when the decorator is called with keyword args.""" + + def func(user_id: int): + pass + + key_fn = _make_key_fn(lambda user_id: f"u:{user_id}", func) + assert key_fn(user_id=7) == "u:7" + + def test_callable_multi_arg(self): + """Callable can combine multiple positional arguments.""" + + def func(tenant: str, user_id: int): + pass + + key_fn = _make_key_fn(lambda tenant, user_id: f"{tenant}:user:{user_id}", func) + assert key_fn("acme", 99) == "acme:user:99" + assert key_fn("beta", 1) == "beta:user:1" + + def test_callable_with_varargs(self): + """Callable using *args / **kwargs receives them directly.""" + + def func(*args, **kwargs): + pass + + key_fn = _make_key_fn( + lambda *a, **k: f"lang:{k.get('lang', a[0] if a else 'en')}", func + ) + assert key_fn("fr") == "lang:fr" + assert key_fn(lang="de") == "lang:de" + assert key_fn() == "lang:en" # default + + def test_callable_conditional_key(self): + """Callable can branch on argument value.""" + + def func(resource_id: int, admin: bool = False): + pass + + def make_key(resource_id: int, admin: bool = False) -> str: + prefix = "admin" if admin else "public" + return f"{prefix}:resource:{resource_id}" + + key_fn = _make_key_fn(make_key, func) + assert key_fn(5, False) == "public:resource:5" + assert key_fn(5, True) == "admin:resource:5" + assert key_fn(5, admin=True) == "admin:resource:5" + + def test_callable_hashing_input(self): + """Callable can hash complex input to produce a short, safe key.""" + + def func(query: str): + pass + + def hashed_key(query: str) -> str: + digest = hashlib.sha256(query.encode()).hexdigest()[:12] + return f"search:{digest}" + + key_fn = _make_key_fn(hashed_key, func) + k1 = key_fn("SELECT * FROM users") + k2 = key_fn("SELECT * FROM products") + assert k1.startswith("search:") + assert k2.startswith("search:") + assert k1 != k2 + assert key_fn("SELECT * FROM users") == k1 # deterministic + + def test_callable_tuple_or_list_input(self): + """Callable can serialise a list/tuple argument into a stable key.""" + + def func(ids: list): + pass + + key_fn = _make_key_fn( + lambda ids: f"batch:{','.join(str(i) for i in sorted(ids))}", func + ) + assert key_fn([3, 1, 2]) == "key" or key_fn([3, 1, 2]) == "batch:1,2,3" + # explicit check + assert key_fn([3, 1, 2]) == "batch:1,2,3" + + def test_callable_named_function(self): + """Named function (not lambda) works identically.""" + + def func(org: str, repo: str): + pass + + def build_key(org: str, repo: str) -> str: + return f"gh:{org}/{repo}" + + key_fn = _make_key_fn(build_key, func) + assert key_fn("acme", "api") == "gh:acme/api" + + # ── End-to-end with @cache decorator ─────────────────────────────────── + + def test_callable_key_end_to_end_sync(self): + """@cache with callable key actually caches distinct keys per argument.""" + calls: list[int] = [] + + @cache(60, key=lambda user_id: f"user:{user_id}") + def get_user(user_id: int) -> dict: + calls.append(user_id) + return {"id": user_id} + + get_user(1) + get_user(1) # hit + get_user(2) # different key + assert calls == [1, 2], "should call underlying fn once per unique id" + + def test_callable_key_end_to_end_async(self): + """@cache async with callable key caches correctly.""" + calls: list[int] = [] + + @cache(60, key=lambda user_id: f"async_user:{user_id}") + async def fetch_user(user_id: int) -> dict: + calls.append(user_id) + return {"id": user_id} + + async def run(): + await fetch_user(10) + await fetch_user(10) # hit + await fetch_user(20) # different key + + asyncio.run(run()) + assert calls == [10, 20] + + def test_callable_key_invalidation(self): + """invalidate() respects callable-key-generated keys.""" + calls: list[int] = [] + + @cache(60, key=lambda uid: f"inv_user:{uid}") + def get_user(uid: int) -> dict: + calls.append(uid) + return {"uid": uid} + + get_user(5) + get_user(5) # hit — calls still [5] + get_user.invalidate(5) + get_user(5) # miss — re-fetches + assert calls == [5, 5] + + def test_callable_key_conditional_namespacing(self): + """Callable key can namespace by environment/context.""" + env = {"name": "staging"} + + @cache(60, key=lambda resource_id: f"{env['name']}:res:{resource_id}") + def get_resource(resource_id: int) -> str: + return f"data:{resource_id}" + + r1 = get_resource(1) + env["name"] = "prod" + r2 = get_resource(1) # different key → miss + assert r1 == "data:1" + assert r2 == "data:1" + + def test_callable_key_multi_arg_end_to_end(self): + """Callable key combining multiple args produces correct cache segmentation.""" + calls: list[tuple] = [] + + @cache(60, key=lambda tenant, endpoint: f"{tenant}:{endpoint}") + def api_call(tenant: str, endpoint: str) -> str: + calls.append((tenant, endpoint)) + return f"{tenant}-{endpoint}" + + api_call("a", "/users") + api_call("a", "/users") # hit + api_call("b", "/users") # different tenant → miss + api_call("a", "/orders") # different endpoint → miss + assert calls == [("a", "/users"), ("b", "/users"), ("a", "/orders")] diff --git a/tests/test_local_file_cache.py b/tests/test_local_file_cache.py index 2e563e1..81ef088 100644 --- a/tests/test_local_file_cache.py +++ b/tests/test_local_file_cache.py @@ -2,42 +2,39 @@ import time import tempfile -from advanced_caching import LocalFileCache +from advanced_caching import LocalFileCache, cache, ChainCache, InMemCache def test_local_file_cache_set_get_and_expiry(): with tempfile.TemporaryDirectory() as tmpdir: - cache = LocalFileCache(tmpdir) - cache.set("foo", "bar", ttl=0.1) - assert cache.get("foo") == "bar" + c = LocalFileCache(tmpdir) + c.set("foo", "bar", ttl=0.1) + assert c.get("foo") == "bar" time.sleep(0.2) - assert cache.get("foo") is None + assert c.get("foo") is None def test_local_file_cache_dedupe_writes(): with tempfile.TemporaryDirectory() as tmpdir: - cache = LocalFileCache(tmpdir, dedupe_writes=True) - cache.set("foo", {"a": 1}, ttl=0) + c = LocalFileCache(tmpdir, dedupe_writes=True) + c.set("foo", {"a": 1}, ttl=0) mtime1 = os.path.getmtime(os.path.join(tmpdir, "foo")) time.sleep(0.05) - cache.set("foo", {"a": 1}, ttl=0) + c.set("foo", {"a": 1}, ttl=0) mtime2 = os.path.getmtime(os.path.join(tmpdir, "foo")) - # Allow filesystem timestamp granularity drift; ensure dedupe prevented meaningful rewrite - assert cache.get("foo") == {"a": 1} + assert c.get("foo") == {"a": 1} assert mtime2 <= mtime1 + 0.1 - cache.set("foo", {"a": 2}, ttl=0) + c.set("foo", {"a": 2}, ttl=0) mtime3 = os.path.getmtime(os.path.join(tmpdir, "foo")) assert mtime3 > mtime2 def test_ttlcache_with_local_file_cache_decorator(): - from advanced_caching import TTLCache - calls = {"n": 0} with tempfile.TemporaryDirectory() as tmpdir: - cache = LocalFileCache(tmpdir) + file_store = LocalFileCache(tmpdir) - @TTLCache.cached("demo", ttl=0.2, cache=cache) + @cache(0.2, key="demo", store=file_store) def compute(): calls["n"] += 1 return calls["n"] @@ -51,20 +48,17 @@ def compute(): def test_chaincache_with_local_file_and_ttlcache(): - from advanced_caching import ChainCache, InMemCache, TTLCache - calls = {"n": 0} with tempfile.TemporaryDirectory() as tmpdir: l1 = InMemCache() l2 = LocalFileCache(tmpdir) chain = ChainCache([(l1, 0), (l2, None)]) - @TTLCache.cached("chain:{user_id}", ttl=0.2, cache=chain) + @cache(0.2, key="chain:{user_id}", store=chain) def fetch_user(user_id: int): calls["n"] += 1 return {"id": user_id, "v": calls["n"]} - # First call populates chain (both L1 and file) u1 = fetch_user(1) assert u1 == {"id": 1, "v": 1} @@ -72,13 +66,14 @@ def fetch_user(user_id: int): u2 = fetch_user(1) assert u2 == u1 - # Clear L1 by recreating chain with fresh InMem but same file backend + # Clear L1, rebuild chain — file backend still has the value l1b = InMemCache() chain2 = ChainCache([(l1b, 0), (l2, None)]) - @TTLCache.cached("chain:{user_id}", ttl=0.2, cache=chain2) + @cache(0.2, key="chain:{user_id}", store=chain2) def fetch_user_again(user_id: int): - return fetch_user(user_id) # will hit file backend via chain2 + calls["n"] += 1 + return {"id": user_id, "v": calls["n"]} u3 = fetch_user_again(1) - assert u3 == u1 # pulled from LocalFileCache via chain + assert u3 == u1 # pulled from LocalFileCache via chain2 diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 340b31d..89a88f1 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -13,7 +13,7 @@ import pytest -from advanced_caching import TTLCache, SWRCache, BGCache +from advanced_caching import cache, bg from advanced_caching.metrics import MetricsCollector, NullMetrics, NULL_METRICS from advanced_caching.storage import InMemCache, InstrumentedStorage @@ -209,7 +209,7 @@ def test_ttlcache_with_metrics(): metrics = MockMetrics() call_count = 0 - @TTLCache.cached("user:{}", ttl=60, metrics=metrics) + @cache(60, key="user:{}", metrics=metrics) def get_user(user_id: int) -> dict: nonlocal call_count call_count += 1 @@ -223,7 +223,7 @@ def get_user(user_id: int) -> dict: # Check metrics assert len(metrics.misses) == 1 # get_entry miss assert metrics.misses[0][0] == "get_user" - assert metrics.misses[0][2]["decorator"] == "TTLCache" + assert metrics.misses[0][2]["decorator"] == "cache" assert len(metrics.sets) == 1 # set after miss assert metrics.sets[0][0] == "get_user" @@ -236,7 +236,7 @@ def get_user(user_id: int) -> dict: # Check hit was recorded assert len(metrics.hits) == 1 assert metrics.hits[0][0] == "get_user" - assert metrics.hits[0][2]["decorator"] == "TTLCache" + assert metrics.hits[0][2]["decorator"] == "cache" @pytest.mark.asyncio @@ -245,7 +245,7 @@ async def test_ttlcache_async_with_metrics(): metrics = MockMetrics() call_count = 0 - @TTLCache.cached("user:{}", ttl=60, metrics=metrics) + @cache(60, key="user:{}", metrics=metrics) async def get_user_async(user_id: int) -> dict: nonlocal call_count call_count += 1 @@ -275,7 +275,7 @@ def test_swrcache_with_metrics(): metrics = MockMetrics() call_count = 0 - @SWRCache.cached("data:{}", ttl=1, stale_ttl=5, metrics=metrics) + @cache(1, stale=5, key="data:{}", metrics=metrics) def fetch_data(key: str) -> str: nonlocal call_count call_count += 1 @@ -312,13 +312,7 @@ async def test_bgcache_with_metrics(): metrics = MockMetrics() call_count = 0 - @BGCache.register_loader( - "config_data", - interval_seconds=1, - ttl=10, - run_immediately=True, - metrics=metrics, - ) + @bg(1, key="config_data", ttl=10, run_immediately=True, metrics=metrics) async def load_config() -> dict: nonlocal call_count call_count += 1 @@ -445,21 +439,21 @@ def test_metrics_latency_overhead(): import timeit # Without metrics - @TTLCache.cached("key:{}", ttl=60) + @cache(60, key="key:{}") def func_no_metrics(key: int) -> int: return key * 2 # With metrics (using NullMetrics for true zero overhead test) from advanced_caching.metrics import NULL_METRICS - @TTLCache.cached("key:{}", ttl=60, metrics=NULL_METRICS) + @cache(60, key="key:{}", metrics=NULL_METRICS) def func_with_null_metrics(key: int) -> int: return key * 2 # With MockMetrics (realistic overhead test) metrics = MockMetrics() - @TTLCache.cached("key:{}", ttl=60, metrics=metrics) + @cache(60, key="key:{}", metrics=metrics) def func_with_mock_metrics(key: int) -> int: return key * 2 @@ -525,7 +519,7 @@ def test_inmemory_metrics_collector(): metrics = InMemoryMetrics() # Test basic cache operations with TTLCache - @TTLCache.cached("user:{id}", ttl=60, metrics=metrics) + @cache(60, key="user:{id}", metrics=metrics) def get_user(id: int): return {"id": id, "name": f"User_{id}"} @@ -584,15 +578,15 @@ def test_shared_metrics_collector(): metrics = InMemoryMetrics() # Multiple functions using the same collector - @TTLCache.cached("user:{id}", ttl=60, metrics=metrics) + @cache(60, key="user:{id}", metrics=metrics) def get_user(id: int): return {"id": id, "name": f"User_{id}"} - @TTLCache.cached("product:{id}", ttl=300, metrics=metrics) + @cache(300, key="product:{id}", metrics=metrics) def get_product(id: int): return {"id": id, "price": 99.99} - @SWRCache.cached("config:{key}", ttl=120, stale_ttl=600, metrics=metrics) + @cache(120, stale=600, key="config:{key}", metrics=metrics) def get_config(key: str): return {"key": key, "value": "enabled"} @@ -661,12 +655,12 @@ async def test_shared_metrics_async(): metrics = InMemoryMetrics() - @TTLCache.cached("async_user:{id}", ttl=60, metrics=metrics) + @cache(60, key="async_user:{id}", metrics=metrics) async def get_user_async(id: int): await asyncio.sleep(0.001) return {"id": id, "name": f"User_{id}"} - @TTLCache.cached("async_product:{id}", ttl=60, metrics=metrics) + @cache(60, key="async_product:{id}", metrics=metrics) async def get_product_async(id: int): await asyncio.sleep(0.001) return {"id": id, "price": 99.99} @@ -697,7 +691,7 @@ def test_inmemory_metrics_thread_safety(): metrics = InMemoryMetrics() - @TTLCache.cached("item:{id}", ttl=60, metrics=metrics) + @cache(60, key="item:{id}", metrics=metrics) def get_item(id: int): time.sleep(0.001) # Simulate work return {"id": id} @@ -739,13 +733,8 @@ def test_bgcache_with_inmemory_metrics(): metrics = InMemoryMetrics() call_count = 0 - # Register BGCache with metrics using decorator - @BGCache.register_loader( - "test_data", - interval_seconds=1, # Refresh every 1 second - run_immediately=True, - metrics=metrics, - ) + # Register bg with metrics using decorator + @bg(1, key="test_data", run_immediately=True, metrics=metrics) def data_loader(): nonlocal call_count call_count += 1 @@ -815,13 +804,8 @@ async def test_bgcache_async_with_inmemory_metrics(): metrics = InMemoryMetrics() call_count = 0 - # Register async BGCache with metrics using decorator - @BGCache.register_loader( - "async_test_data", - interval_seconds=1, - run_immediately=True, - metrics=metrics, - ) + # Register async bg with metrics using decorator + @bg(1, key="async_test_data", run_immediately=True, metrics=metrics) async def async_data_loader(): nonlocal call_count call_count += 1 @@ -878,25 +862,20 @@ def test_shared_metrics_all_decorators(): metrics = InMemoryMetrics() - # TTLCache function - @TTLCache.cached("user:{id}", ttl=60, metrics=metrics) + # TTL cache function + @cache(60, key="user:{id}", metrics=metrics) def get_user(id: int): return {"id": id, "type": "user"} - # SWRCache function - @SWRCache.cached("product:{id}", ttl=10, stale_ttl=60, metrics=metrics) + # SWR cache function + @cache(10, stale=60, key="product:{id}", metrics=metrics) def get_product(id: int): return {"id": id, "type": "product"} - # BGCache function + # Background cache function bg_call_count = 0 - @BGCache.register_loader( - "shared_bg_data", - interval_seconds=1, - run_immediately=True, - metrics=metrics, - ) + @bg(1, key="shared_bg_data", run_immediately=True, metrics=metrics) def bg_loader(): nonlocal bg_call_count bg_call_count += 1 @@ -904,13 +883,13 @@ def bg_loader(): try: # Generate traffic for all three types - get_user(1) # TTLCache miss - get_user(1) # TTLCache hit + get_user(1) # cache miss + get_user(1) # cache hit - get_product(100) # SWRCache miss - get_product(100) # SWRCache hit + get_product(100) # SWR miss + get_product(100) # SWR hit - # Wait for BGCache initial load + # Wait for bg initial load time.sleep(0.1) bg_data = bg_loader() # BGCache call assert bg_data is not None @@ -947,9 +926,9 @@ def bg_loader(): assert total_bg_refreshes >= 1 print(f"\n✓ All three decorator types tracked in single collector:") - print(f" - get_user (TTLCache): {stats['caches']['get_user']}") - print(f" - get_product (SWRCache): {stats['caches']['get_product']}") - print(f" - shared_bg_data (BGCache): {stats['caches']['shared_bg_data']}") + print(f" - get_user (cache): {stats['caches']['get_user']}") + print(f" - get_product (SWR cache): {stats['caches']['get_product']}") + print(f" - shared_bg_data (bg cache): {stats['caches']['shared_bg_data']}") print(f" - Background refreshes: {total_bg_refreshes}") finally: diff --git a/tests/test_s3_cache_integration.py b/tests/test_s3_cache_integration.py index 2318b2d..6719a82 100644 --- a/tests/test_s3_cache_integration.py +++ b/tests/test_s3_cache_integration.py @@ -12,7 +12,7 @@ except ImportError: # pragma: no cover mock_aws = None -from advanced_caching import S3Cache, TTLCache, SWRCache, ChainCache, InMemCache +from advanced_caching import cache, S3Cache, ChainCache, InMemCache S3_ENDPOINT = os.getenv("S3_ENDPOINT_URL") USE_REAL_S3 = bool(S3_ENDPOINT) @@ -47,7 +47,7 @@ def test_s3cache_set_get_and_dedupe(dedupe): client.create_bucket(Bucket="test-bkt") except Exception: pass - cache = S3Cache( + store = S3Cache( bucket="test-bkt", prefix="t/", s3_client=client, @@ -55,14 +55,14 @@ def test_s3cache_set_get_and_dedupe(dedupe): dedupe_writes=dedupe, ) - cache.set("k1", {"v": 1}, ttl=0) - assert cache.get("k1") == {"v": 1} + store.set("k1", {"v": 1}, ttl=0) + assert store.get("k1") == {"v": 1} - cache.set("k1", {"v": 1}, ttl=0) - assert cache.get("k1") == {"v": 1} + store.set("k1", {"v": 1}, ttl=0) + assert store.get("k1") == {"v": 1} - cache.set("k1", {"v": 2}, ttl=0) - assert cache.get("k1") == {"v": 2} + store.set("k1", {"v": 2}, ttl=0) + assert store.get("k1") == {"v": 2} @_maybe_mock @@ -72,7 +72,7 @@ def test_ttlcache_with_s3cache_decorator(): client.create_bucket(Bucket="test-bkt") except Exception: pass - cache = S3Cache( + store = S3Cache( bucket="test-bkt", prefix="u/", s3_client=client, @@ -83,7 +83,7 @@ def test_ttlcache_with_s3cache_decorator(): calls = {"n": 0} - @TTLCache.cached("user:{user_id}", ttl=0.2, cache=cache) + @cache(0.2, key="user:{user_id}", store=store) def fetch_user(user_id: int): calls["n"] += 1 return {"id": user_id, "n": calls["n"]} @@ -94,7 +94,7 @@ def fetch_user(user_id: int): time.sleep(1.0) # Force delete to ensure cache miss if TTL/time drift is an issue - cache.delete("user:1") + store.delete("user:1") third = fetch_user(1) # TTL expired, should recompute assert third["n"] >= 2 @@ -107,13 +107,13 @@ def test_swrcache_with_s3cache_decorator(): client.create_bucket(Bucket="test-bkt-swr") except Exception: pass - cache = S3Cache( + store = S3Cache( bucket="test-bkt-swr", prefix="swr/", s3_client=client, serializer="json" ) calls = {"n": 0} - @SWRCache.cached("data:{id}", ttl=0.5, stale_ttl=1.0, cache=cache) + @cache(0.5, stale=1.0, key="data:{id}", store=store) def fetch_data(id: int): calls["n"] += 1 return {"id": id, "n": calls["n"]} @@ -126,7 +126,7 @@ def fetch_data(id: int): v2 = fetch_data(1) assert v2["n"] == 1 - # 3. Wait for TTL to expire but within stale_ttl + # 3. Wait for TTL to expire but within stale window time.sleep(0.6) # Should return stale value immediately, trigger background refresh v3 = fetch_data(1) diff --git a/tests/test_sync_support.py b/tests/test_sync_support.py index f8c6a03..617c503 100644 --- a/tests/test_sync_support.py +++ b/tests/test_sync_support.py @@ -1,10 +1,10 @@ import asyncio import pytest -from advanced_caching import TTLCache, SWRCache, BGCache +from advanced_caching import cache, bg def test_ttl_sync_remains_sync(): - @TTLCache.cached("ttl_sync", ttl=60) + @cache(60, key="ttl_sync") def sync_fn(x): return x + 1 @@ -14,7 +14,7 @@ def sync_fn(x): @pytest.mark.asyncio async def test_ttl_async_remains_async(): - @TTLCache.cached("ttl_async", ttl=60) + @cache(60, key="ttl_async") async def async_fn(x): return x + 1 @@ -23,7 +23,7 @@ async def async_fn(x): def test_swr_sync_remains_sync(): - @SWRCache.cached("swr_sync", ttl=60) + @cache(60, key="swr_sync") def sync_fn(x): return x + 1 @@ -33,7 +33,7 @@ def sync_fn(x): @pytest.mark.asyncio async def test_swr_async_remains_async(): - @SWRCache.cached("swr_async", ttl=60) + @cache(60, key="swr_async") async def async_fn(x): return x + 1 @@ -42,21 +42,21 @@ async def async_fn(x): def test_bg_sync_remains_sync(): - @BGCache.register_loader("bg_sync", interval_seconds=60) + @bg(60, key="bg_sync") def sync_loader(): return 42 assert not asyncio.iscoroutinefunction(sync_loader) assert sync_loader() == 42 - BGCache.shutdown() + bg.shutdown() @pytest.mark.asyncio async def test_bg_async_remains_async(): - @BGCache.register_loader("bg_async", interval_seconds=60) + @bg(60, key="bg_async") async def async_loader(): return 42 assert asyncio.iscoroutinefunction(async_loader) assert await async_loader() == 42 - BGCache.shutdown() + bg.shutdown() diff --git a/uv.lock b/uv.lock index 249e69c..5dcf490 100644 --- a/uv.lock +++ b/uv.lock @@ -10,7 +10,7 @@ resolution-markers = [ [[package]] name = "advanced-caching" -version = "0.2.2b0" +version = "1.0.0" source = { editable = "." } dependencies = [ { name = "apscheduler" }, @@ -61,33 +61,33 @@ dev = [ [package.metadata] requires-dist = [ - { name = "apscheduler", specifier = ">=3.10" }, - { name = "google-cloud-monitoring", marker = "extra == 'all-metrics'", specifier = ">=2.15.0" }, - { name = "google-cloud-monitoring", marker = "extra == 'gcp-monitoring'", specifier = ">=2.15.0" }, - { name = "google-cloud-storage", marker = "extra == 'tests-gcs'", specifier = ">=2.10.0" }, - { name = "moto", extras = ["boto3"], marker = "extra == 'tests-s3'", specifier = ">=5.0.0" }, - { name = "opentelemetry-api", marker = "extra == 'all-metrics'", specifier = ">=1.20.0" }, - { name = "opentelemetry-api", marker = "extra == 'opentelemetry'", specifier = ">=1.20.0" }, - { name = "opentelemetry-sdk", marker = "extra == 'all-metrics'", specifier = ">=1.20.0" }, - { name = "opentelemetry-sdk", marker = "extra == 'opentelemetry'", specifier = ">=1.20.0" }, - { name = "orjson", specifier = ">=3.11.5" }, - { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.2" }, - { name = "pytest", marker = "extra == 'tests'" }, - { name = "pytest-asyncio", marker = "extra == 'tests'" }, - { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.0" }, - { name = "pytest-cov", marker = "extra == 'tests'" }, - { name = "redis", marker = "extra == 'redis'", specifier = ">=5.0.0" }, + { name = "apscheduler", specifier = ">=3.11.2" }, + { name = "google-cloud-monitoring", marker = "extra == 'all-metrics'", specifier = ">=2.29.1" }, + { name = "google-cloud-monitoring", marker = "extra == 'gcp-monitoring'", specifier = ">=2.29.1" }, + { name = "google-cloud-storage", marker = "extra == 'tests-gcs'", specifier = ">=3.9.0" }, + { name = "moto", marker = "extra == 'tests-s3'", specifier = ">=5.1.22" }, + { name = "opentelemetry-api", marker = "extra == 'all-metrics'", specifier = ">=1.40.0" }, + { name = "opentelemetry-api", marker = "extra == 'opentelemetry'", specifier = ">=1.40.0" }, + { name = "opentelemetry-sdk", marker = "extra == 'all-metrics'", specifier = ">=1.40.0" }, + { name = "opentelemetry-sdk", marker = "extra == 'opentelemetry'", specifier = ">=1.40.0" }, + { name = "orjson", specifier = ">=3.11.7" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.2" }, + { name = "pytest", marker = "extra == 'tests'", specifier = ">=9.0.2" }, + { name = "pytest-asyncio", marker = "extra == 'tests'", specifier = ">=1.3.0" }, + { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=7.0.0" }, + { name = "pytest-cov", marker = "extra == 'tests'", specifier = ">=7.0.0" }, + { name = "redis", marker = "extra == 'redis'", specifier = ">=7.3.0" }, ] provides-extras = ["redis", "dev", "tests", "tests-s3", "tests-gcs", "metrics", "opentelemetry", "gcp-monitoring", "all-metrics"] [package.metadata.requires-dev] dev = [ - { name = "pytest", specifier = ">=8.2" }, + { name = "pytest", specifier = ">=9.0.2" }, { name = "pytest-asyncio", specifier = ">=1.3.0" }, - { name = "pytest-cov", specifier = ">=4.0" }, - { name = "ruff", specifier = ">=0.14.8" }, - { name = "scalene", specifier = ">=1.5.55" }, - { name = "testcontainers", extras = ["redis"], specifier = ">=4.0.0" }, + { name = "pytest-cov", specifier = ">=7.0.0" }, + { name = "ruff", specifier = ">=0.15.5" }, + { name = "scalene", specifier = ">=2.1.4" }, + { name = "testcontainers", extras = ["redis"], specifier = ">=4.14.1" }, ] [[package]] @@ -101,14 +101,14 @@ wheels = [ [[package]] name = "apscheduler" -version = "3.11.1" +version = "3.11.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "tzlocal" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/81/192db4f8471de5bc1f0d098783decffb1e6e69c4f8b4bc6711094691950b/apscheduler-3.11.1.tar.gz", hash = "sha256:0db77af6400c84d1747fe98a04b8b58f0080c77d11d338c4f507a9752880f221", size = 108044 } +sdist = { url = "https://files.pythonhosted.org/packages/07/12/3e4389e5920b4c1763390c6d371162f3784f86f85cd6d6c1bfe68eef14e2/apscheduler-3.11.2.tar.gz", hash = "sha256:2a9966b052ec805f020c8c4c3ae6e6a06e24b1bf19f2e11d91d8cca0473eef41", size = 108683 } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/9f/d3c76f76c73fcc959d28e9def45b8b1cc3d7722660c5003b19c1022fd7f4/apscheduler-3.11.1-py3-none-any.whl", hash = "sha256:6162cb5683cb09923654fa9bdd3130c4be4bfda6ad8990971c9597ecd52965d2", size = 64278 }, + { url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439 }, ] [[package]] @@ -610,7 +610,7 @@ wheels = [ [[package]] name = "google-cloud-monitoring" -version = "2.28.0" +version = "2.29.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core", extra = ["grpc"] }, @@ -619,14 +619,14 @@ dependencies = [ { name = "proto-plus" }, { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bc/b8/7f68a7738cbfef610af532b2fc758e39d852fc93ed3a31bd0e76fd45d2fd/google_cloud_monitoring-2.28.0.tar.gz", hash = "sha256:25175590907e038add644b5b744941d221776342924637095a879973a7c0ac37", size = 393321 } +sdist = { url = "https://files.pythonhosted.org/packages/97/06/9fc0a34bed4221a68eef3e0373ae054de367dc42c0b689d5d917587ef61b/google_cloud_monitoring-2.29.1.tar.gz", hash = "sha256:86cac55cdd2608561819d19544fb3c129bbb7dcecc445d8de426e34cd6fa8e49", size = 404383 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ad/d3/02dcf5376cb4b47b9c06eba36d80700d5b0a1510f3fcd47d3abbe4b0f0a3/google_cloud_monitoring-2.28.0-py3-none-any.whl", hash = "sha256:64f4c57cc465dd51cceffe559f0ec6fa9f96aa6d82790cd8d3af6d5cc3795160", size = 384670 }, + { url = "https://files.pythonhosted.org/packages/ac/97/7c27aa95eccf8b62b066295a7c4ad04284364b696d3e7d9d47152b255a24/google_cloud_monitoring-2.29.1-py3-none-any.whl", hash = "sha256:944a57031f20da38617d184d5658c1f938e019e8061f27fd944584831a1b9d5a", size = 387922 }, ] [[package]] name = "google-cloud-storage" -version = "3.7.0" +version = "3.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core" }, @@ -636,9 +636,9 @@ dependencies = [ { name = "google-resumable-media" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d2/8e/fab2de1a0ab7fdbd452eaae5a9a5c933d0911c26b04efa0c76ddfd921259/google_cloud_storage-3.7.0.tar.gz", hash = "sha256:9ce59c65f4d6e372effcecc0456680a8d73cef4f2dc9212a0704799cb3d69237", size = 17258914 } +sdist = { url = "https://files.pythonhosted.org/packages/f7/b1/4f0798e88285b50dfc60ed3a7de071def538b358db2da468c2e0deecbb40/google_cloud_storage-3.9.0.tar.gz", hash = "sha256:f2d8ca7db2f652be757e92573b2196e10fbc09649b5c016f8b422ad593c641cc", size = 17298544 } wheels = [ - { url = "https://files.pythonhosted.org/packages/2d/80/6e5c7c83cea15ed4dfc4843b9df9db0716bc551ac938f7b5dd18a72bd5e4/google_cloud_storage-3.7.0-py3-none-any.whl", hash = "sha256:469bc9540936e02f8a4bfd1619e9dca1e42dec48f95e4204d783b36476a15093", size = 303364 }, + { url = "https://files.pythonhosted.org/packages/46/0b/816a6ae3c9fd096937d2e5f9670558908811d57d59ddf69dd4b83b326fd1/google_cloud_storage-3.9.0-py3-none-any.whl", hash = "sha256:2dce75a9e8b3387078cbbdad44757d410ecdb916101f8ba308abf202b6968066", size = 321324 }, ] [[package]] @@ -934,7 +934,7 @@ wheels = [ [[package]] name = "moto" -version = "5.1.18" +version = "5.1.22" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "boto3" }, @@ -947,9 +947,9 @@ dependencies = [ { name = "werkzeug" }, { name = "xmltodict" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e3/6a/a73bef67261bfab55714390f07c7df97531d00cea730b7c0ace4d0ad7669/moto-5.1.18.tar.gz", hash = "sha256:45298ef7b88561b839f6fe3e9da2a6e2ecd10283c7bf3daf43a07a97465885f9", size = 8271655 } +sdist = { url = "https://files.pythonhosted.org/packages/b2/3d/1765accbf753dc1ae52f26a2e2ed2881d78c2eb9322c178e45312472e4a0/moto-5.1.22.tar.gz", hash = "sha256:e5b2c378296e4da50ce5a3c355a1743c8d6d396ea41122f5bb2a40f9b9a8cc0e", size = 8547792 } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/d4/6991df072b34741a0c115e8d21dc2fe142e4b497319d762e957f6677f001/moto-5.1.18-py3-none-any.whl", hash = "sha256:b65aa8fc9032c5c574415451e14fd7da4e43fd50b8bdcb5f10289ad382c25bcf", size = 6357278 }, + { url = "https://files.pythonhosted.org/packages/46/4f/8812a01e3e0bd6be3e13b90432fb5c696af9a720af3f00e6eba5ad748345/moto-5.1.22-py3-none-any.whl", hash = "sha256:d9f20ae3cf29c44f93c1f8f06c8f48d5560e5dc027816ef1d0d2059741ffcfbe", size = 6617400 }, ] [[package]] @@ -1114,123 +1114,123 @@ wheels = [ [[package]] name = "opentelemetry-api" -version = "1.39.1" +version = "1.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767 } +sdist = { url = "https://files.pythonhosted.org/packages/2c/1d/4049a9e8698361cc1a1aa03a6c59e4fa4c71e0c0f94a30f988a6876a2ae6/opentelemetry_api-1.40.0.tar.gz", hash = "sha256:159be641c0b04d11e9ecd576906462773eb97ae1b657730f0ecf64d32071569f", size = 70851 } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356 }, + { url = "https://files.pythonhosted.org/packages/5f/bf/93795954016c522008da367da292adceed71cca6ee1717e1d64c83089099/opentelemetry_api-1.40.0-py3-none-any.whl", hash = "sha256:82dd69331ae74b06f6a874704be0cfaa49a1650e1537d4a813b86ecef7d0ecf9", size = 68676 }, ] [[package]] name = "opentelemetry-sdk" -version = "1.39.1" +version = "1.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460 } +sdist = { url = "https://files.pythonhosted.org/packages/58/fd/3c3125b20ba18ce2155ba9ea74acb0ae5d25f8cd39cfd37455601b7955cc/opentelemetry_sdk-1.40.0.tar.gz", hash = "sha256:18e9f5ec20d859d268c7cb3c5198c8d105d073714db3de50b593b8c1345a48f2", size = 184252 } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565 }, + { url = "https://files.pythonhosted.org/packages/2c/c5/6a852903d8bfac758c6dc6e9a68b015d3c33f2f1be5e9591e0f4b69c7e0a/opentelemetry_sdk-1.40.0-py3-none-any.whl", hash = "sha256:787d2154a71f4b3d81f20524a8ce061b7db667d24e46753f32a7bc48f1c1f3f1", size = 141951 }, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.60b1" +version = "0.61b0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935 } +sdist = { url = "https://files.pythonhosted.org/packages/6d/c0/4ae7973f3c2cfd2b6e321f1675626f0dab0a97027cc7a297474c9c8f3d04/opentelemetry_semantic_conventions-0.61b0.tar.gz", hash = "sha256:072f65473c5d7c6dc0355b27d6c9d1a679d63b6d4b4b16a9773062cb7e31192a", size = 145755 } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982 }, + { url = "https://files.pythonhosted.org/packages/b2/37/cc6a55e448deaa9b27377d087da8615a3416d8ad523d5960b78dbeadd02a/opentelemetry_semantic_conventions-0.61b0-py3-none-any.whl", hash = "sha256:fa530a96be229795f8cef353739b618148b0fe2b4b3f005e60e262926c4d38e2", size = 231621 }, ] [[package]] name = "orjson" -version = "3.11.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/04/b8/333fdb27840f3bf04022d21b654a35f58e15407183aeb16f3b41aa053446/orjson-3.11.5.tar.gz", hash = "sha256:82393ab47b4fe44ffd0a7659fa9cfaacc717eb617c93cde83795f14af5c2e9d5", size = 5972347 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/79/19/b22cf9dad4db20c8737041046054cbd4f38bb5a2d0e4bb60487832ce3d76/orjson-3.11.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:df9eadb2a6386d5ea2bfd81309c505e125cfc9ba2b1b99a97e60985b0b3665d1", size = 245719 }, - { url = "https://files.pythonhosted.org/packages/03/2e/b136dd6bf30ef5143fbe76a4c142828b55ccc618be490201e9073ad954a1/orjson-3.11.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc70da619744467d8f1f49a8cadae5ec7bbe054e5232d95f92ed8737f8c5870", size = 132467 }, - { url = "https://files.pythonhosted.org/packages/ae/fc/ae99bfc1e1887d20a0268f0e2686eb5b13d0ea7bbe01de2b566febcd2130/orjson-3.11.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:073aab025294c2f6fc0807201c76fdaed86f8fc4be52c440fb78fbb759a1ac09", size = 130702 }, - { url = "https://files.pythonhosted.org/packages/6e/43/ef7912144097765997170aca59249725c3ab8ef6079f93f9d708dd058df5/orjson-3.11.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:835f26fa24ba0bb8c53ae2a9328d1706135b74ec653ed933869b74b6909e63fd", size = 135907 }, - { url = "https://files.pythonhosted.org/packages/3f/da/24d50e2d7f4092ddd4d784e37a3fa41f22ce8ed97abc9edd222901a96e74/orjson-3.11.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667c132f1f3651c14522a119e4dd631fad98761fa960c55e8e7430bb2a1ba4ac", size = 139935 }, - { url = "https://files.pythonhosted.org/packages/02/4a/b4cb6fcbfff5b95a3a019a8648255a0fac9b221fbf6b6e72be8df2361feb/orjson-3.11.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42e8961196af655bb5e63ce6c60d25e8798cd4dfbc04f4203457fa3869322c2e", size = 137541 }, - { url = "https://files.pythonhosted.org/packages/a5/99/a11bd129f18c2377c27b2846a9d9be04acec981f770d711ba0aaea563984/orjson-3.11.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75412ca06e20904c19170f8a24486c4e6c7887dea591ba18a1ab572f1300ee9f", size = 139031 }, - { url = "https://files.pythonhosted.org/packages/64/29/d7b77d7911574733a036bb3e8ad7053ceb2b7d6ea42208b9dbc55b23b9ed/orjson-3.11.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6af8680328c69e15324b5af3ae38abbfcf9cbec37b5346ebfd52339c3d7e8a18", size = 141622 }, - { url = "https://files.pythonhosted.org/packages/93/41/332db96c1de76b2feda4f453e91c27202cd092835936ce2b70828212f726/orjson-3.11.5-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a86fe4ff4ea523eac8f4b57fdac319faf037d3c1be12405e6a7e86b3fbc4756a", size = 413800 }, - { url = "https://files.pythonhosted.org/packages/76/e1/5a0d148dd1f89ad2f9651df67835b209ab7fcb1118658cf353425d7563e9/orjson-3.11.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e607b49b1a106ee2086633167033afbd63f76f2999e9236f638b06b112b24ea7", size = 151198 }, - { url = "https://files.pythonhosted.org/packages/0d/96/8db67430d317a01ae5cf7971914f6775affdcfe99f5bff9ef3da32492ecc/orjson-3.11.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7339f41c244d0eea251637727f016b3d20050636695bc78345cce9029b189401", size = 141984 }, - { url = "https://files.pythonhosted.org/packages/71/49/40d21e1aa1ac569e521069228bb29c9b5a350344ccf922a0227d93c2ed44/orjson-3.11.5-cp310-cp310-win32.whl", hash = "sha256:8be318da8413cdbbce77b8c5fac8d13f6eb0f0db41b30bb598631412619572e8", size = 135272 }, - { url = "https://files.pythonhosted.org/packages/c4/7e/d0e31e78be0c100e08be64f48d2850b23bcb4d4c70d114f4e43b39f6895a/orjson-3.11.5-cp310-cp310-win_amd64.whl", hash = "sha256:b9f86d69ae822cabc2a0f6c099b43e8733dda788405cba2665595b7e8dd8d167", size = 133360 }, - { url = "https://files.pythonhosted.org/packages/fd/68/6b3659daec3a81aed5ab47700adb1a577c76a5452d35b91c88efee89987f/orjson-3.11.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9c8494625ad60a923af6b2b0bd74107146efe9b55099e20d7740d995f338fcd8", size = 245318 }, - { url = "https://files.pythonhosted.org/packages/e9/00/92db122261425f61803ccf0830699ea5567439d966cbc35856fe711bfe6b/orjson-3.11.5-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:7bb2ce0b82bc9fd1168a513ddae7a857994b780b2945a8c51db4ab1c4b751ebc", size = 129491 }, - { url = "https://files.pythonhosted.org/packages/94/4f/ffdcb18356518809d944e1e1f77589845c278a1ebbb5a8297dfefcc4b4cb/orjson-3.11.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67394d3becd50b954c4ecd24ac90b5051ee7c903d167459f93e77fc6f5b4c968", size = 132167 }, - { url = "https://files.pythonhosted.org/packages/97/c6/0a8caff96f4503f4f7dd44e40e90f4d14acf80d3b7a97cb88747bb712d3e/orjson-3.11.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:298d2451f375e5f17b897794bcc3e7b821c0f32b4788b9bcae47ada24d7f3cf7", size = 130516 }, - { url = "https://files.pythonhosted.org/packages/4d/63/43d4dc9bd9954bff7052f700fdb501067f6fb134a003ddcea2a0bb3854ed/orjson-3.11.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa5e4244063db8e1d87e0f54c3f7522f14b2dc937e65d5241ef0076a096409fd", size = 135695 }, - { url = "https://files.pythonhosted.org/packages/87/6f/27e2e76d110919cb7fcb72b26166ee676480a701bcf8fc53ac5d0edce32f/orjson-3.11.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1db2088b490761976c1b2e956d5d4e6409f3732e9d79cfa69f876c5248d1baf9", size = 139664 }, - { url = "https://files.pythonhosted.org/packages/d4/f8/5966153a5f1be49b5fbb8ca619a529fde7bc71aa0a376f2bb83fed248bcd/orjson-3.11.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2ed66358f32c24e10ceea518e16eb3549e34f33a9d51f99ce23b0251776a1ef", size = 137289 }, - { url = "https://files.pythonhosted.org/packages/a7/34/8acb12ff0299385c8bbcbb19fbe40030f23f15a6de57a9c587ebf71483fb/orjson-3.11.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2021afda46c1ed64d74b555065dbd4c2558d510d8cec5ea6a53001b3e5e82a9", size = 138784 }, - { url = "https://files.pythonhosted.org/packages/ee/27/910421ea6e34a527f73d8f4ee7bdffa48357ff79c7b8d6eb6f7b82dd1176/orjson-3.11.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b42ffbed9128e547a1647a3e50bc88ab28ae9daa61713962e0d3dd35e820c125", size = 141322 }, - { url = "https://files.pythonhosted.org/packages/87/a3/4b703edd1a05555d4bb1753d6ce44e1a05b7a6d7c164d5b332c795c63d70/orjson-3.11.5-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:8d5f16195bb671a5dd3d1dbea758918bada8f6cc27de72bd64adfbd748770814", size = 413612 }, - { url = "https://files.pythonhosted.org/packages/1b/36/034177f11d7eeea16d3d2c42a1883b0373978e08bc9dad387f5074c786d8/orjson-3.11.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c0e5d9f7a0227df2927d343a6e3859bebf9208b427c79bd31949abcc2fa32fa5", size = 150993 }, - { url = "https://files.pythonhosted.org/packages/44/2f/ea8b24ee046a50a7d141c0227c4496b1180b215e728e3b640684f0ea448d/orjson-3.11.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23d04c4543e78f724c4dfe656b3791b5f98e4c9253e13b2636f1af5d90e4a880", size = 141774 }, - { url = "https://files.pythonhosted.org/packages/8a/12/cc440554bf8200eb23348a5744a575a342497b65261cd65ef3b28332510a/orjson-3.11.5-cp311-cp311-win32.whl", hash = "sha256:c404603df4865f8e0afe981aa3c4b62b406e6d06049564d58934860b62b7f91d", size = 135109 }, - { url = "https://files.pythonhosted.org/packages/a3/83/e0c5aa06ba73a6760134b169f11fb970caa1525fa4461f94d76e692299d9/orjson-3.11.5-cp311-cp311-win_amd64.whl", hash = "sha256:9645ef655735a74da4990c24ffbd6894828fbfa117bc97c1edd98c282ecb52e1", size = 133193 }, - { url = "https://files.pythonhosted.org/packages/cb/35/5b77eaebc60d735e832c5b1a20b155667645d123f09d471db0a78280fb49/orjson-3.11.5-cp311-cp311-win_arm64.whl", hash = "sha256:1cbf2735722623fcdee8e712cbaaab9e372bbcb0c7924ad711b261c2eccf4a5c", size = 126830 }, - { url = "https://files.pythonhosted.org/packages/ef/a4/8052a029029b096a78955eadd68ab594ce2197e24ec50e6b6d2ab3f4e33b/orjson-3.11.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:334e5b4bff9ad101237c2d799d9fd45737752929753bf4faf4b207335a416b7d", size = 245347 }, - { url = "https://files.pythonhosted.org/packages/64/67/574a7732bd9d9d79ac620c8790b4cfe0717a3d5a6eb2b539e6e8995e24a0/orjson-3.11.5-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:ff770589960a86eae279f5d8aa536196ebda8273a2a07db2a54e82b93bc86626", size = 129435 }, - { url = "https://files.pythonhosted.org/packages/52/8d/544e77d7a29d90cf4d9eecd0ae801c688e7f3d1adfa2ebae5e1e94d38ab9/orjson-3.11.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed24250e55efbcb0b35bed7caaec8cedf858ab2f9f2201f17b8938c618c8ca6f", size = 132074 }, - { url = "https://files.pythonhosted.org/packages/6e/57/b9f5b5b6fbff9c26f77e785baf56ae8460ef74acdb3eae4931c25b8f5ba9/orjson-3.11.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a66d7769e98a08a12a139049aac2f0ca3adae989817f8c43337455fbc7669b85", size = 130520 }, - { url = "https://files.pythonhosted.org/packages/f6/6d/d34970bf9eb33f9ec7c979a262cad86076814859e54eb9a059a52f6dc13d/orjson-3.11.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86cfc555bfd5794d24c6a1903e558b50644e5e68e6471d66502ce5cb5fdef3f9", size = 136209 }, - { url = "https://files.pythonhosted.org/packages/e7/39/bc373b63cc0e117a105ea12e57280f83ae52fdee426890d57412432d63b3/orjson-3.11.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a230065027bc2a025e944f9d4714976a81e7ecfa940923283bca7bbc1f10f626", size = 139837 }, - { url = "https://files.pythonhosted.org/packages/cb/aa/7c4818c8d7d324da220f4f1af55c343956003aa4d1ce1857bdc1d396ba69/orjson-3.11.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b29d36b60e606df01959c4b982729c8845c69d1963f88686608be9ced96dbfaa", size = 137307 }, - { url = "https://files.pythonhosted.org/packages/46/bf/0993b5a056759ba65145effe3a79dd5a939d4a070eaa5da2ee3180fbb13f/orjson-3.11.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74099c6b230d4261fdc3169d50efc09abf38ace1a42ea2f9994b1d79153d477", size = 139020 }, - { url = "https://files.pythonhosted.org/packages/65/e8/83a6c95db3039e504eda60fc388f9faedbb4f6472f5aba7084e06552d9aa/orjson-3.11.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e697d06ad57dd0c7a737771d470eedc18e68dfdefcdd3b7de7f33dfda5b6212e", size = 141099 }, - { url = "https://files.pythonhosted.org/packages/b9/b4/24fdc024abfce31c2f6812973b0a693688037ece5dc64b7a60c1ce69e2f2/orjson-3.11.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e08ca8a6c851e95aaecc32bc44a5aa75d0ad26af8cdac7c77e4ed93acf3d5b69", size = 413540 }, - { url = "https://files.pythonhosted.org/packages/d9/37/01c0ec95d55ed0c11e4cae3e10427e479bba40c77312b63e1f9665e0737d/orjson-3.11.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e8b5f96c05fce7d0218df3fdfeb962d6b8cfff7e3e20264306b46dd8b217c0f3", size = 151530 }, - { url = "https://files.pythonhosted.org/packages/f9/d4/f9ebc57182705bb4bbe63f5bbe14af43722a2533135e1d2fb7affa0c355d/orjson-3.11.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ddbfdb5099b3e6ba6d6ea818f61997bb66de14b411357d24c4612cf1ebad08ca", size = 141863 }, - { url = "https://files.pythonhosted.org/packages/0d/04/02102b8d19fdcb009d72d622bb5781e8f3fae1646bf3e18c53d1bc8115b5/orjson-3.11.5-cp312-cp312-win32.whl", hash = "sha256:9172578c4eb09dbfcf1657d43198de59b6cef4054de385365060ed50c458ac98", size = 135255 }, - { url = "https://files.pythonhosted.org/packages/d4/fb/f05646c43d5450492cb387de5549f6de90a71001682c17882d9f66476af5/orjson-3.11.5-cp312-cp312-win_amd64.whl", hash = "sha256:2b91126e7b470ff2e75746f6f6ee32b9ab67b7a93c8ba1d15d3a0caaf16ec875", size = 133252 }, - { url = "https://files.pythonhosted.org/packages/dc/a6/7b8c0b26ba18c793533ac1cd145e131e46fcf43952aa94c109b5b913c1f0/orjson-3.11.5-cp312-cp312-win_arm64.whl", hash = "sha256:acbc5fac7e06777555b0722b8ad5f574739e99ffe99467ed63da98f97f9ca0fe", size = 126777 }, - { url = "https://files.pythonhosted.org/packages/10/43/61a77040ce59f1569edf38f0b9faadc90c8cf7e9bec2e0df51d0132c6bb7/orjson-3.11.5-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:3b01799262081a4c47c035dd77c1301d40f568f77cc7ec1bb7db5d63b0a01629", size = 245271 }, - { url = "https://files.pythonhosted.org/packages/55/f9/0f79be617388227866d50edd2fd320cb8fb94dc1501184bb1620981a0aba/orjson-3.11.5-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:61de247948108484779f57a9f406e4c84d636fa5a59e411e6352484985e8a7c3", size = 129422 }, - { url = "https://files.pythonhosted.org/packages/77/42/f1bf1549b432d4a78bfa95735b79b5dac75b65b5bb815bba86ad406ead0a/orjson-3.11.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:894aea2e63d4f24a7f04a1908307c738d0dce992e9249e744b8f4e8dd9197f39", size = 132060 }, - { url = "https://files.pythonhosted.org/packages/25/49/825aa6b929f1a6ed244c78acd7b22c1481fd7e5fda047dc8bf4c1a807eb6/orjson-3.11.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ddc21521598dbe369d83d4d40338e23d4101dad21dae0e79fa20465dbace019f", size = 130391 }, - { url = "https://files.pythonhosted.org/packages/42/ec/de55391858b49e16e1aa8f0bbbb7e5997b7345d8e984a2dec3746d13065b/orjson-3.11.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cce16ae2f5fb2c53c3eafdd1706cb7b6530a67cc1c17abe8ec747f5cd7c0c51", size = 135964 }, - { url = "https://files.pythonhosted.org/packages/1c/40/820bc63121d2d28818556a2d0a09384a9f0262407cf9fa305e091a8048df/orjson-3.11.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e46c762d9f0e1cfb4ccc8515de7f349abbc95b59cb5a2bd68df5973fdef913f8", size = 139817 }, - { url = "https://files.pythonhosted.org/packages/09/c7/3a445ca9a84a0d59d26365fd8898ff52bdfcdcb825bcc6519830371d2364/orjson-3.11.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7345c759276b798ccd6d77a87136029e71e66a8bbf2d2755cbdde1d82e78706", size = 137336 }, - { url = "https://files.pythonhosted.org/packages/9a/b3/dc0d3771f2e5d1f13368f56b339c6782f955c6a20b50465a91acb79fe961/orjson-3.11.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75bc2e59e6a2ac1dd28901d07115abdebc4563b5b07dd612bf64260a201b1c7f", size = 138993 }, - { url = "https://files.pythonhosted.org/packages/d1/a2/65267e959de6abe23444659b6e19c888f242bf7725ff927e2292776f6b89/orjson-3.11.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:54aae9b654554c3b4edd61896b978568c6daa16af96fa4681c9b5babd469f863", size = 141070 }, - { url = "https://files.pythonhosted.org/packages/63/c9/da44a321b288727a322c6ab17e1754195708786a04f4f9d2220a5076a649/orjson-3.11.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4bdd8d164a871c4ec773f9de0f6fe8769c2d6727879c37a9666ba4183b7f8228", size = 413505 }, - { url = "https://files.pythonhosted.org/packages/7f/17/68dc14fa7000eefb3d4d6d7326a190c99bb65e319f02747ef3ebf2452f12/orjson-3.11.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a261fef929bcf98a60713bf5e95ad067cea16ae345d9a35034e73c3990e927d2", size = 151342 }, - { url = "https://files.pythonhosted.org/packages/c4/c5/ccee774b67225bed630a57478529fc026eda33d94fe4c0eac8fe58d4aa52/orjson-3.11.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c028a394c766693c5c9909dec76b24f37e6a1b91999e8d0c0d5feecbe93c3e05", size = 141823 }, - { url = "https://files.pythonhosted.org/packages/67/80/5d00e4155d0cd7390ae2087130637671da713959bb558db9bac5e6f6b042/orjson-3.11.5-cp313-cp313-win32.whl", hash = "sha256:2cc79aaad1dfabe1bd2d50ee09814a1253164b3da4c00a78c458d82d04b3bdef", size = 135236 }, - { url = "https://files.pythonhosted.org/packages/95/fe/792cc06a84808dbdc20ac6eab6811c53091b42f8e51ecebf14b540e9cfe4/orjson-3.11.5-cp313-cp313-win_amd64.whl", hash = "sha256:ff7877d376add4e16b274e35a3f58b7f37b362abf4aa31863dadacdd20e3a583", size = 133167 }, - { url = "https://files.pythonhosted.org/packages/46/2c/d158bd8b50e3b1cfdcf406a7e463f6ffe3f0d167b99634717acdaf5e299f/orjson-3.11.5-cp313-cp313-win_arm64.whl", hash = "sha256:59ac72ea775c88b163ba8d21b0177628bd015c5dd060647bbab6e22da3aad287", size = 126712 }, - { url = "https://files.pythonhosted.org/packages/c2/60/77d7b839e317ead7bb225d55bb50f7ea75f47afc489c81199befc5435b50/orjson-3.11.5-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e446a8ea0a4c366ceafc7d97067bfd55292969143b57e3c846d87fc701e797a0", size = 245252 }, - { url = "https://files.pythonhosted.org/packages/f1/aa/d4639163b400f8044cef0fb9aa51b0337be0da3a27187a20d1166e742370/orjson-3.11.5-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:53deb5addae9c22bbe3739298f5f2196afa881ea75944e7720681c7080909a81", size = 129419 }, - { url = "https://files.pythonhosted.org/packages/30/94/9eabf94f2e11c671111139edf5ec410d2f21e6feee717804f7e8872d883f/orjson-3.11.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cd00d49d6063d2b8791da5d4f9d20539c5951f965e45ccf4e96d33505ce68f", size = 132050 }, - { url = "https://files.pythonhosted.org/packages/3d/c8/ca10f5c5322f341ea9a9f1097e140be17a88f88d1cfdd29df522970d9744/orjson-3.11.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3fd15f9fc8c203aeceff4fda211157fad114dde66e92e24097b3647a08f4ee9e", size = 130370 }, - { url = "https://files.pythonhosted.org/packages/25/d4/e96824476d361ee2edd5c6290ceb8d7edf88d81148a6ce172fc00278ca7f/orjson-3.11.5-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9df95000fbe6777bf9820ae82ab7578e8662051bb5f83d71a28992f539d2cda7", size = 136012 }, - { url = "https://files.pythonhosted.org/packages/85/8e/9bc3423308c425c588903f2d103cfcfe2539e07a25d6522900645a6f257f/orjson-3.11.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92a8d676748fca47ade5bc3da7430ed7767afe51b2f8100e3cd65e151c0eaceb", size = 139809 }, - { url = "https://files.pythonhosted.org/packages/e9/3c/b404e94e0b02a232b957c54643ce68d0268dacb67ac33ffdee24008c8b27/orjson-3.11.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa0f513be38b40234c77975e68805506cad5d57b3dfd8fe3baa7f4f4051e15b4", size = 137332 }, - { url = "https://files.pythonhosted.org/packages/51/30/cc2d69d5ce0ad9b84811cdf4a0cd5362ac27205a921da524ff42f26d65e0/orjson-3.11.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1863e75b92891f553b7922ce4ee10ed06db061e104f2b7815de80cdcb135ad", size = 138983 }, - { url = "https://files.pythonhosted.org/packages/0e/87/de3223944a3e297d4707d2fe3b1ffb71437550e165eaf0ca8bbe43ccbcb1/orjson-3.11.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d4be86b58e9ea262617b8ca6251a2f0d63cc132a6da4b5fcc8e0a4128782c829", size = 141069 }, - { url = "https://files.pythonhosted.org/packages/65/30/81d5087ae74be33bcae3ff2d80f5ccaa4a8fedc6d39bf65a427a95b8977f/orjson-3.11.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:b923c1c13fa02084eb38c9c065afd860a5cff58026813319a06949c3af5732ac", size = 413491 }, - { url = "https://files.pythonhosted.org/packages/d0/6f/f6058c21e2fc1efaf918986dbc2da5cd38044f1a2d4b7b91ad17c4acf786/orjson-3.11.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:1b6bd351202b2cd987f35a13b5e16471cf4d952b42a73c391cc537974c43ef6d", size = 151375 }, - { url = "https://files.pythonhosted.org/packages/54/92/c6921f17d45e110892899a7a563a925b2273d929959ce2ad89e2525b885b/orjson-3.11.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb150d529637d541e6af06bbe3d02f5498d628b7f98267ff87647584293ab439", size = 141850 }, - { url = "https://files.pythonhosted.org/packages/88/86/cdecb0140a05e1a477b81f24739da93b25070ee01ce7f7242f44a6437594/orjson-3.11.5-cp314-cp314-win32.whl", hash = "sha256:9cc1e55c884921434a84a0c3dd2699eb9f92e7b441d7f53f3941079ec6ce7499", size = 135278 }, - { url = "https://files.pythonhosted.org/packages/e4/97/b638d69b1e947d24f6109216997e38922d54dcdcdb1b11c18d7efd2d3c59/orjson-3.11.5-cp314-cp314-win_amd64.whl", hash = "sha256:a4f3cb2d874e03bc7767c8f88adaa1a9a05cecea3712649c3b58589ec7317310", size = 133170 }, - { url = "https://files.pythonhosted.org/packages/8f/dd/f4fff4a6fe601b4f8f3ba3aa6da8ac33d17d124491a3b804c662a70e1636/orjson-3.11.5-cp314-cp314-win_arm64.whl", hash = "sha256:38b22f476c351f9a1c43e5b07d8b5a02eb24a6ab8e75f700f7d479d4568346a5", size = 126713 }, +version = "3.11.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/53/45/b268004f745ede84e5798b48ee12b05129d19235d0e15267aa57dcdb400b/orjson-3.11.7.tar.gz", hash = "sha256:9b1a67243945819ce55d24a30b59d6a168e86220452d2c96f4d1f093e71c0c49", size = 6144992 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/1a/a373746fa6d0e116dd9e54371a7b54622c44d12296d5d0f3ad5e3ff33490/orjson-3.11.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a02c833f38f36546ba65a452127633afce4cf0dd7296b753d3bb54e55e5c0174", size = 229140 }, + { url = "https://files.pythonhosted.org/packages/52/a2/fa129e749d500f9b183e8a3446a193818a25f60261e9ce143ad61e975208/orjson-3.11.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63c6e6738d7c3470ad01601e23376aa511e50e1f3931395b9f9c722406d1a67", size = 128670 }, + { url = "https://files.pythonhosted.org/packages/08/93/1e82011cd1e0bd051ef9d35bed1aa7fb4ea1f0a055dc2c841b46b43a9ebd/orjson-3.11.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:043d3006b7d32c7e233b8cfb1f01c651013ea079e08dcef7189a29abd8befe11", size = 123832 }, + { url = "https://files.pythonhosted.org/packages/fe/d8/a26b431ef962c7d55736674dddade876822f3e33223c1f47a36879350d04/orjson-3.11.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57036b27ac8a25d81112eb0cc9835cd4833c5b16e1467816adc0015f59e870dc", size = 129171 }, + { url = "https://files.pythonhosted.org/packages/a7/19/f47819b84a580f490da260c3ee9ade214cf4cf78ac9ce8c1c758f80fdfc9/orjson-3.11.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:733ae23ada68b804b222c44affed76b39e30806d38660bf1eb200520d259cc16", size = 141967 }, + { url = "https://files.pythonhosted.org/packages/5b/cd/37ece39a0777ba077fdcdbe4cccae3be8ed00290c14bf8afdc548befc260/orjson-3.11.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fdfad2093bdd08245f2e204d977facd5f871c88c4a71230d5bcbd0e43bf6222", size = 130991 }, + { url = "https://files.pythonhosted.org/packages/8f/ed/f2b5d66aa9b6b5c02ff5f120efc7b38c7c4962b21e6be0f00fd99a5c348e/orjson-3.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cededd6738e1c153530793998e31c05086582b08315db48ab66649768f326baa", size = 133674 }, + { url = "https://files.pythonhosted.org/packages/c4/6e/baa83e68d1aa09fa8c3e5b2c087d01d0a0bd45256de719ed7bc22c07052d/orjson-3.11.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:14f440c7268c8f8633d1b3d443a434bd70cb15686117ea6beff8fdc8f5917a1e", size = 138722 }, + { url = "https://files.pythonhosted.org/packages/0c/47/7f8ef4963b772cd56999b535e553f7eb5cd27e9dd6c049baee6f18bfa05d/orjson-3.11.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3a2479753bbb95b0ebcf7969f562cdb9668e6d12416a35b0dda79febf89cdea2", size = 409056 }, + { url = "https://files.pythonhosted.org/packages/38/eb/2df104dd2244b3618f25325a656f85cc3277f74bbd91224752410a78f3c7/orjson-3.11.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:71924496986275a737f38e3f22b4e0878882b3f7a310d2ff4dc96e812789120c", size = 144196 }, + { url = "https://files.pythonhosted.org/packages/b6/2a/ee41de0aa3a6686598661eae2b4ebdff1340c65bfb17fcff8b87138aab21/orjson-3.11.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b4a9eefdc70bf8bf9857f0290f973dec534ac84c35cd6a7f4083be43e7170a8f", size = 134979 }, + { url = "https://files.pythonhosted.org/packages/4c/fa/92fc5d3d402b87a8b28277a9ed35386218a6a5287c7fe5ee9b9f02c53fb2/orjson-3.11.7-cp310-cp310-win32.whl", hash = "sha256:ae9e0b37a834cef7ce8f99de6498f8fad4a2c0bf6bfc3d02abd8ed56aa15b2de", size = 127968 }, + { url = "https://files.pythonhosted.org/packages/07/29/a576bf36d73d60df06904d3844a9df08e25d59eba64363aaf8ec2f9bff41/orjson-3.11.7-cp310-cp310-win_amd64.whl", hash = "sha256:d772afdb22555f0c58cfc741bdae44180122b3616faa1ecadb595cd526e4c993", size = 125128 }, + { url = "https://files.pythonhosted.org/packages/37/02/da6cb01fc6087048d7f61522c327edf4250f1683a58a839fdcc435746dd5/orjson-3.11.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9487abc2c2086e7c8eb9a211d2ce8855bae0e92586279d0d27b341d5ad76c85c", size = 228664 }, + { url = "https://files.pythonhosted.org/packages/c1/c2/5885e7a5881dba9a9af51bc564e8967225a642b3e03d089289a35054e749/orjson-3.11.7-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:79cacb0b52f6004caf92405a7e1f11e6e2de8bdf9019e4f76b44ba045125cd6b", size = 125344 }, + { url = "https://files.pythonhosted.org/packages/a4/1d/4e7688de0a92d1caf600dfd5fb70b4c5bfff51dfa61ac555072ef2d0d32a/orjson-3.11.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e85fe4698b6a56d5e2ebf7ae87544d668eb6bde1ad1226c13f44663f20ec9e", size = 128404 }, + { url = "https://files.pythonhosted.org/packages/2f/b2/ec04b74ae03a125db7bd69cffd014b227b7f341e3261bf75b5eb88a1aa92/orjson-3.11.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8d14b71c0b12963fe8a62aac87119f1afdf4cb88a400f61ca5ae581449efcb5", size = 123677 }, + { url = "https://files.pythonhosted.org/packages/4c/69/f95bdf960605f08f827f6e3291fe243d8aa9c5c9ff017a8d7232209184c3/orjson-3.11.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91c81ef070c8f3220054115e1ef468b1c9ce8497b4e526cb9f68ab4dc0a7ac62", size = 128950 }, + { url = "https://files.pythonhosted.org/packages/a4/1b/de59c57bae1d148ef298852abd31909ac3089cff370dfd4cd84cc99cbc42/orjson-3.11.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:411ebaf34d735e25e358a6d9e7978954a9c9d58cfb47bc6683cdc3964cd2f910", size = 141756 }, + { url = "https://files.pythonhosted.org/packages/ee/9e/9decc59f4499f695f65c650f6cfa6cd4c37a3fbe8fa235a0a3614cb54386/orjson-3.11.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a16bcd08ab0bcdfc7e8801d9c4a9cc17e58418e4d48ddc6ded4e9e4b1a94062b", size = 130812 }, + { url = "https://files.pythonhosted.org/packages/28/e6/59f932bcabd1eac44e334fe8e3281a92eacfcb450586e1f4bde0423728d8/orjson-3.11.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c0b51672e466fd7e56230ffbae7f1639e18d0ce023351fb75da21b71bc2c960", size = 133444 }, + { url = "https://files.pythonhosted.org/packages/f1/36/b0f05c0eaa7ca30bc965e37e6a2956b0d67adb87a9872942d3568da846ae/orjson-3.11.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:136dcd6a2e796dfd9ffca9fc027d778567b0b7c9968d092842d3c323cef88aa8", size = 138609 }, + { url = "https://files.pythonhosted.org/packages/b8/03/58ec7d302b8d86944c60c7b4b82975d5161fcce4c9bc8c6cb1d6741b6115/orjson-3.11.7-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:7ba61079379b0ae29e117db13bda5f28d939766e410d321ec1624afc6a0b0504", size = 408918 }, + { url = "https://files.pythonhosted.org/packages/06/3a/868d65ef9a8b99be723bd510de491349618abd9f62c826cf206d962db295/orjson-3.11.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0527a4510c300e3b406591b0ba69b5dc50031895b0a93743526a3fc45f59d26e", size = 143998 }, + { url = "https://files.pythonhosted.org/packages/5b/c7/1e18e1c83afe3349f4f6dc9e14910f0ae5f82eac756d1412ea4018938535/orjson-3.11.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a709e881723c9b18acddcfb8ba357322491ad553e277cf467e1e7e20e2d90561", size = 134802 }, + { url = "https://files.pythonhosted.org/packages/d4/0b/ccb7ee1a65b37e8eeb8b267dc953561d72370e85185e459616d4345bab34/orjson-3.11.7-cp311-cp311-win32.whl", hash = "sha256:c43b8b5bab288b6b90dac410cca7e986a4fa747a2e8f94615aea407da706980d", size = 127828 }, + { url = "https://files.pythonhosted.org/packages/af/9e/55c776dffda3f381e0f07d010a4f5f3902bf48eaba1bb7684d301acd4924/orjson-3.11.7-cp311-cp311-win_amd64.whl", hash = "sha256:6543001328aa857187f905308a028935864aefe9968af3848401b6fe80dbb471", size = 124941 }, + { url = "https://files.pythonhosted.org/packages/aa/8e/424a620fa7d263b880162505fb107ef5e0afaa765b5b06a88312ac291560/orjson-3.11.7-cp311-cp311-win_arm64.whl", hash = "sha256:1ee5cc7160a821dfe14f130bc8e63e7611051f964b463d9e2a3a573204446a4d", size = 126245 }, + { url = "https://files.pythonhosted.org/packages/80/bf/76f4f1665f6983385938f0e2a5d7efa12a58171b8456c252f3bae8a4cf75/orjson-3.11.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bd03ea7606833655048dab1a00734a2875e3e86c276e1d772b2a02556f0d895f", size = 228545 }, + { url = "https://files.pythonhosted.org/packages/79/53/6c72c002cb13b5a978a068add59b25a8bdf2800ac1c9c8ecdb26d6d97064/orjson-3.11.7-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:89e440ebc74ce8ab5c7bc4ce6757b4a6b1041becb127df818f6997b5c71aa60b", size = 125224 }, + { url = "https://files.pythonhosted.org/packages/2c/83/10e48852865e5dd151bdfe652c06f7da484578ed02c5fca938e3632cb0b8/orjson-3.11.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ede977b5fe5ac91b1dffc0a517ca4542d2ec8a6a4ff7b2652d94f640796342a", size = 128154 }, + { url = "https://files.pythonhosted.org/packages/6e/52/a66e22a2b9abaa374b4a081d410edab6d1e30024707b87eab7c734afe28d/orjson-3.11.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b7b1dae39230a393df353827c855a5f176271c23434cfd2db74e0e424e693e10", size = 123548 }, + { url = "https://files.pythonhosted.org/packages/de/38/605d371417021359f4910c496f764c48ceb8997605f8c25bf1dfe58c0ebe/orjson-3.11.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed46f17096e28fb28d2975834836a639af7278aa87c84f68ab08fbe5b8bd75fa", size = 129000 }, + { url = "https://files.pythonhosted.org/packages/44/98/af32e842b0ffd2335c89714d48ca4e3917b42f5d6ee5537832e069a4b3ac/orjson-3.11.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3726be79e36e526e3d9c1aceaadbfb4a04ee80a72ab47b3f3c17fefb9812e7b8", size = 141686 }, + { url = "https://files.pythonhosted.org/packages/96/0b/fc793858dfa54be6feee940c1463370ece34b3c39c1ca0aa3845f5ba9892/orjson-3.11.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0724e265bc548af1dedebd9cb3d24b4e1c1e685a343be43e87ba922a5c5fff2f", size = 130812 }, + { url = "https://files.pythonhosted.org/packages/dc/91/98a52415059db3f374757d0b7f0f16e3b5cd5976c90d1c2b56acaea039e6/orjson-3.11.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7745312efa9e11c17fbd3cb3097262d079da26930ae9ae7ba28fb738367cbad", size = 133440 }, + { url = "https://files.pythonhosted.org/packages/dc/b6/cb540117bda61791f46381f8c26c8f93e802892830a6055748d3bb1925ab/orjson-3.11.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f904c24bdeabd4298f7a977ef14ca2a022ca921ed670b92ecd16ab6f3d01f867", size = 138386 }, + { url = "https://files.pythonhosted.org/packages/63/1a/50a3201c334a7f17c231eee5f841342190723794e3b06293f26e7cf87d31/orjson-3.11.7-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b9fc4d0f81f394689e0814617aadc4f2ea0e8025f38c226cbf22d3b5ddbf025d", size = 408853 }, + { url = "https://files.pythonhosted.org/packages/87/cd/8de1c67d0be44fdc22701e5989c0d015a2adf391498ad42c4dc589cd3013/orjson-3.11.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:849e38203e5be40b776ed2718e587faf204d184fc9a008ae441f9442320c0cab", size = 144130 }, + { url = "https://files.pythonhosted.org/packages/0f/fe/d605d700c35dd55f51710d159fc54516a280923cd1b7e47508982fbb387d/orjson-3.11.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4682d1db3bcebd2b64757e0ddf9e87ae5f00d29d16c5cdf3a62f561d08cc3dd2", size = 134818 }, + { url = "https://files.pythonhosted.org/packages/e4/e4/15ecc67edb3ddb3e2f46ae04475f2d294e8b60c1825fbe28a428b93b3fbd/orjson-3.11.7-cp312-cp312-win32.whl", hash = "sha256:f4f7c956b5215d949a1f65334cf9d7612dde38f20a95f2315deef167def91a6f", size = 127923 }, + { url = "https://files.pythonhosted.org/packages/34/70/2e0855361f76198a3965273048c8e50a9695d88cd75811a5b46444895845/orjson-3.11.7-cp312-cp312-win_amd64.whl", hash = "sha256:bf742e149121dc5648ba0a08ea0871e87b660467ef168a3a5e53bc1fbd64bb74", size = 125007 }, + { url = "https://files.pythonhosted.org/packages/68/40/c2051bd19fc467610fed469dc29e43ac65891571138f476834ca192bc290/orjson-3.11.7-cp312-cp312-win_arm64.whl", hash = "sha256:26c3b9132f783b7d7903bf1efb095fed8d4a3a85ec0d334ee8beff3d7a4749d5", size = 126089 }, + { url = "https://files.pythonhosted.org/packages/89/25/6e0e52cac5aab51d7b6dcd257e855e1dec1c2060f6b28566c509b4665f62/orjson-3.11.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1d98b30cc1313d52d4af17d9c3d307b08389752ec5f2e5febdfada70b0f8c733", size = 228390 }, + { url = "https://files.pythonhosted.org/packages/a5/29/a77f48d2fc8a05bbc529e5ff481fb43d914f9e383ea2469d4f3d51df3d00/orjson-3.11.7-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:d897e81f8d0cbd2abb82226d1860ad2e1ab3ff16d7b08c96ca00df9d45409ef4", size = 125189 }, + { url = "https://files.pythonhosted.org/packages/89/25/0a16e0729a0e6a1504f9d1a13cdd365f030068aab64cec6958396b9969d7/orjson-3.11.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:814be4b49b228cfc0b3c565acf642dd7d13538f966e3ccde61f4f55be3e20785", size = 128106 }, + { url = "https://files.pythonhosted.org/packages/66/da/a2e505469d60666a05ab373f1a6322eb671cb2ba3a0ccfc7d4bc97196787/orjson-3.11.7-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d06e5c5fed5caedd2e540d62e5b1c25e8c82431b9e577c33537e5fa4aa909539", size = 123363 }, + { url = "https://files.pythonhosted.org/packages/23/bf/ed73f88396ea35c71b38961734ea4a4746f7ca0768bf28fd551d37e48dd0/orjson-3.11.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31c80ce534ac4ea3739c5ee751270646cbc46e45aea7576a38ffec040b4029a1", size = 129007 }, + { url = "https://files.pythonhosted.org/packages/73/3c/b05d80716f0225fc9008fbf8ab22841dcc268a626aa550561743714ce3bf/orjson-3.11.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f50979824bde13d32b4320eedd513431c921102796d86be3eee0b58e58a3ecd1", size = 141667 }, + { url = "https://files.pythonhosted.org/packages/61/e8/0be9b0addd9bf86abfc938e97441dcd0375d494594b1c8ad10fe57479617/orjson-3.11.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e54f3808e2b6b945078c41aa8d9b5834b28c50843846e97807e5adb75fa9705", size = 130832 }, + { url = "https://files.pythonhosted.org/packages/c9/ec/c68e3b9021a31d9ec15a94931db1410136af862955854ed5dd7e7e4f5bff/orjson-3.11.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12b80df61aab7b98b490fe9e4879925ba666fccdfcd175252ce4d9035865ace", size = 133373 }, + { url = "https://files.pythonhosted.org/packages/d2/45/f3466739aaafa570cc8e77c6dbb853c48bf56e3b43738020e2661e08b0ac/orjson-3.11.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:996b65230271f1a97026fd0e6a753f51fbc0c335d2ad0c6201f711b0da32693b", size = 138307 }, + { url = "https://files.pythonhosted.org/packages/e1/84/9f7f02288da1ffb31405c1be07657afd1eecbcb4b64ee2817b6fe0f785fa/orjson-3.11.7-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ab49d4b2a6a1d415ddb9f37a21e02e0d5dbfe10b7870b21bf779fc21e9156157", size = 408695 }, + { url = "https://files.pythonhosted.org/packages/18/07/9dd2f0c0104f1a0295ffbe912bc8d63307a539b900dd9e2c48ef7810d971/orjson-3.11.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:390a1dce0c055ddf8adb6aa94a73b45a4a7d7177b5c584b8d1c1947f2ba60fb3", size = 144099 }, + { url = "https://files.pythonhosted.org/packages/a5/66/857a8e4a3292e1f7b1b202883bcdeb43a91566cf59a93f97c53b44bd6801/orjson-3.11.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1eb80451a9c351a71dfaf5b7ccc13ad065405217726b59fdbeadbcc544f9d223", size = 134806 }, + { url = "https://files.pythonhosted.org/packages/0a/5b/6ebcf3defc1aab3a338ca777214966851e92efb1f30dc7fc8285216e6d1b/orjson-3.11.7-cp313-cp313-win32.whl", hash = "sha256:7477aa6a6ec6139c5cb1cc7b214643592169a5494d200397c7fc95d740d5fcf3", size = 127914 }, + { url = "https://files.pythonhosted.org/packages/00/04/c6f72daca5092e3117840a1b1e88dfc809cc1470cf0734890d0366b684a1/orjson-3.11.7-cp313-cp313-win_amd64.whl", hash = "sha256:b9f95dcdea9d4f805daa9ddf02617a89e484c6985fa03055459f90e87d7a0757", size = 124986 }, + { url = "https://files.pythonhosted.org/packages/03/ba/077a0f6f1085d6b806937246860fafbd5b17f3919c70ee3f3d8d9c713f38/orjson-3.11.7-cp313-cp313-win_arm64.whl", hash = "sha256:800988273a014a0541483dc81021247d7eacb0c845a9d1a34a422bc718f41539", size = 126045 }, + { url = "https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:de0a37f21d0d364954ad5de1970491d7fbd0fb1ef7417d4d56a36dc01ba0c0a0", size = 228391 }, + { url = "https://files.pythonhosted.org/packages/46/19/e40f6225da4d3aa0c8dc6e5219c5e87c2063a560fe0d72a88deb59776794/orjson-3.11.7-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:c2428d358d85e8da9d37cba18b8c4047c55222007a84f97156a5b22028dfbfc0", size = 125188 }, + { url = "https://files.pythonhosted.org/packages/9d/7e/c4de2babef2c0817fd1f048fd176aa48c37bec8aef53d2fa932983032cce/orjson-3.11.7-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c4bc6c6ac52cdaa267552544c73e486fecbd710b7ac09bc024d5a78555a22f6", size = 128097 }, + { url = "https://files.pythonhosted.org/packages/eb/74/233d360632bafd2197f217eee7fb9c9d0229eac0c18128aee5b35b0014fe/orjson-3.11.7-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd0d68edd7dfca1b2eca9361a44ac9f24b078de3481003159929a0573f21a6bf", size = 123364 }, + { url = "https://files.pythonhosted.org/packages/79/51/af79504981dd31efe20a9e360eb49c15f06df2b40e7f25a0a52d9ae888e8/orjson-3.11.7-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:623ad1b9548ef63886319c16fa317848e465a21513b31a6ad7b57443c3e0dcf5", size = 129076 }, + { url = "https://files.pythonhosted.org/packages/67/e2/da898eb68b72304f8de05ca6715870d09d603ee98d30a27e8a9629abc64b/orjson-3.11.7-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6e776b998ac37c0396093d10290e60283f59cfe0fc3fccbd0ccc4bd04dd19892", size = 141705 }, + { url = "https://files.pythonhosted.org/packages/c5/89/15364d92acb3d903b029e28d834edb8780c2b97404cbf7929aa6b9abdb24/orjson-3.11.7-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c6c3af76716f4a9c290371ba2e390ede06f6603edb277b481daf37f6f464e", size = 130855 }, + { url = "https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a56df3239294ea5964adf074c54bcc4f0ccd21636049a2cf3ca9cf03b5d03cf1", size = 133386 }, + { url = "https://files.pythonhosted.org/packages/b9/0e/45e1dcf10e17d0924b7c9162f87ec7b4ca79e28a0548acf6a71788d3e108/orjson-3.11.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bda117c4148e81f746655d5a3239ae9bd00cb7bc3ca178b5fc5a5997e9744183", size = 138295 }, + { url = "https://files.pythonhosted.org/packages/63/d7/4d2e8b03561257af0450f2845b91fbd111d7e526ccdf737267108075e0ba/orjson-3.11.7-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:23d6c20517a97a9daf1d48b580fcdc6f0516c6f4b5038823426033690b4d2650", size = 408720 }, + { url = "https://files.pythonhosted.org/packages/78/cf/d45343518282108b29c12a65892445fc51f9319dc3c552ceb51bb5905ed2/orjson-3.11.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:8ff206156006da5b847c9304b6308a01e8cdbc8cce824e2779a5ba71c3def141", size = 144152 }, + { url = "https://files.pythonhosted.org/packages/a9/3a/d6001f51a7275aacd342e77b735c71fa04125a3f93c36fee4526bc8c654e/orjson-3.11.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:962d046ee1765f74a1da723f4b33e3b228fe3a48bd307acce5021dfefe0e29b2", size = 134814 }, + { url = "https://files.pythonhosted.org/packages/1d/d3/f19b47ce16820cc2c480f7f1723e17f6d411b3a295c60c8ad3aa9ff1c96a/orjson-3.11.7-cp314-cp314-win32.whl", hash = "sha256:89e13dd3f89f1c38a9c9eba5fbf7cdc2d1feca82f5f290864b4b7a6aac704576", size = 127997 }, + { url = "https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl", hash = "sha256:845c3e0d8ded9c9271cd79596b9b552448b885b97110f628fb687aee2eed11c1", size = 124985 }, + { url = "https://files.pythonhosted.org/packages/6f/1c/f2a8d8a1b17514660a614ce5f7aac74b934e69f5abc2700cc7ced882a009/orjson-3.11.7-cp314-cp314-win_arm64.whl", hash = "sha256:4a2e9c5be347b937a2e0203866f12bba36082e89b402ddb9e927d5822e43088d", size = 126038 }, ] [[package]] @@ -1631,14 +1631,14 @@ wheels = [ [[package]] name = "redis" -version = "7.1.0" +version = "7.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "async-timeout", marker = "python_full_version < '3.11.3'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/43/c8/983d5c6579a411d8a99bc5823cc5712768859b5ce2c8afe1a65b37832c81/redis-7.1.0.tar.gz", hash = "sha256:b1cc3cfa5a2cb9c2ab3ba700864fb0ad75617b41f01352ce5779dabf6d5f9c3c", size = 4796669 } +sdist = { url = "https://files.pythonhosted.org/packages/da/82/4d1a5279f6c1251d3d2a603a798a1137c657de9b12cfc1fba4858232c4d2/redis-7.3.0.tar.gz", hash = "sha256:4d1b768aafcf41b01022410b3cc4f15a07d9b3d6fe0c66fc967da2c88e551034", size = 4928081 } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/f0/8956f8a86b20d7bb9d6ac0187cf4cd54d8065bc9a1a09eb8011d4d326596/redis-7.1.0-py3-none-any.whl", hash = "sha256:23c52b208f92b56103e17c5d06bdc1a6c2c0b3106583985a76a18f83b265de2b", size = 354159 }, + { url = "https://files.pythonhosted.org/packages/f0/28/84e57fce7819e81ec5aa1bd31c42b89607241f4fb1a3ea5b0d2dbeaea26c/redis-7.3.0-py3-none-any.whl", hash = "sha256:9d4fcb002a12a5e3c3fbe005d59c48a2cc231f87fbb2f6b70c2d89bb64fec364", size = 404379 }, ] [[package]] @@ -1697,28 +1697,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.9" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/1b/ab712a9d5044435be8e9a2beb17cbfa4c241aa9b5e4413febac2a8b79ef2/ruff-0.14.9.tar.gz", hash = "sha256:35f85b25dd586381c0cc053f48826109384c81c00ad7ef1bd977bfcc28119d5b", size = 5809165 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b8/1c/d1b1bba22cffec02351c78ab9ed4f7d7391876e12720298448b29b7229c1/ruff-0.14.9-py3-none-linux_armv6l.whl", hash = "sha256:f1ec5de1ce150ca6e43691f4a9ef5c04574ad9ca35c8b3b0e18877314aba7e75", size = 13576541 }, - { url = "https://files.pythonhosted.org/packages/94/ab/ffe580e6ea1fca67f6337b0af59fc7e683344a43642d2d55d251ff83ceae/ruff-0.14.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ed9d7417a299fc6030b4f26333bf1117ed82a61ea91238558c0268c14e00d0c2", size = 13779363 }, - { url = "https://files.pythonhosted.org/packages/7d/f8/2be49047f929d6965401855461e697ab185e1a6a683d914c5c19c7962d9e/ruff-0.14.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d5dc3473c3f0e4a1008d0ef1d75cee24a48e254c8bed3a7afdd2b4392657ed2c", size = 12925292 }, - { url = "https://files.pythonhosted.org/packages/9e/e9/08840ff5127916bb989c86f18924fd568938b06f58b60e206176f327c0fe/ruff-0.14.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84bf7c698fc8f3cb8278830fb6b5a47f9bcc1ed8cb4f689b9dd02698fa840697", size = 13362894 }, - { url = "https://files.pythonhosted.org/packages/31/1c/5b4e8e7750613ef43390bb58658eaf1d862c0cc3352d139cd718a2cea164/ruff-0.14.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa733093d1f9d88a5d98988d8834ef5d6f9828d03743bf5e338bf980a19fce27", size = 13311482 }, - { url = "https://files.pythonhosted.org/packages/5b/3a/459dce7a8cb35ba1ea3e9c88f19077667a7977234f3b5ab197fad240b404/ruff-0.14.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a1cfb04eda979b20c8c19550c8b5f498df64ff8da151283311ce3199e8b3648", size = 14016100 }, - { url = "https://files.pythonhosted.org/packages/a6/31/f064f4ec32524f9956a0890fc6a944e5cf06c63c554e39957d208c0ffc45/ruff-0.14.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1e5cb521e5ccf0008bd74d5595a4580313844a42b9103b7388eca5a12c970743", size = 15477729 }, - { url = "https://files.pythonhosted.org/packages/7a/6d/f364252aad36ccd443494bc5f02e41bf677f964b58902a17c0b16c53d890/ruff-0.14.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd429a8926be6bba4befa8cdcf3f4dd2591c413ea5066b1e99155ed245ae42bb", size = 15122386 }, - { url = "https://files.pythonhosted.org/packages/20/02/e848787912d16209aba2799a4d5a1775660b6a3d0ab3944a4ccc13e64a02/ruff-0.14.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab208c1b7a492e37caeaf290b1378148f75e13c2225af5d44628b95fd7834273", size = 14497124 }, - { url = "https://files.pythonhosted.org/packages/f3/51/0489a6a5595b7760b5dbac0dd82852b510326e7d88d51dbffcd2e07e3ff3/ruff-0.14.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72034534e5b11e8a593f517b2f2f2b273eb68a30978c6a2d40473ad0aaa4cb4a", size = 14195343 }, - { url = "https://files.pythonhosted.org/packages/f6/53/3bb8d2fa73e4c2f80acc65213ee0830fa0c49c6479313f7a68a00f39e208/ruff-0.14.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:712ff04f44663f1b90a1195f51525836e3413c8a773574a7b7775554269c30ed", size = 14346425 }, - { url = "https://files.pythonhosted.org/packages/ad/04/bdb1d0ab876372da3e983896481760867fc84f969c5c09d428e8f01b557f/ruff-0.14.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a111fee1db6f1d5d5810245295527cda1d367c5aa8f42e0fca9a78ede9b4498b", size = 13258768 }, - { url = "https://files.pythonhosted.org/packages/40/d9/8bf8e1e41a311afd2abc8ad12be1b6c6c8b925506d9069b67bb5e9a04af3/ruff-0.14.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8769efc71558fecc25eb295ddec7d1030d41a51e9dcf127cbd63ec517f22d567", size = 13326939 }, - { url = "https://files.pythonhosted.org/packages/f4/56/a213fa9edb6dd849f1cfbc236206ead10913693c72a67fb7ddc1833bf95d/ruff-0.14.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:347e3bf16197e8a2de17940cd75fd6491e25c0aa7edf7d61aa03f146a1aa885a", size = 13578888 }, - { url = "https://files.pythonhosted.org/packages/33/09/6a4a67ffa4abae6bf44c972a4521337ffce9cbc7808faadede754ef7a79c/ruff-0.14.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7715d14e5bccf5b660f54516558aa94781d3eb0838f8e706fb60e3ff6eff03a8", size = 14314473 }, - { url = "https://files.pythonhosted.org/packages/12/0d/15cc82da5d83f27a3c6b04f3a232d61bc8c50d38a6cd8da79228e5f8b8d6/ruff-0.14.9-py3-none-win32.whl", hash = "sha256:df0937f30aaabe83da172adaf8937003ff28172f59ca9f17883b4213783df197", size = 13202651 }, - { url = "https://files.pythonhosted.org/packages/32/f7/c78b060388eefe0304d9d42e68fab8cffd049128ec466456cef9b8d4f06f/ruff-0.14.9-py3-none-win_amd64.whl", hash = "sha256:c0b53a10e61df15a42ed711ec0bda0c582039cf6c754c49c020084c55b5b0bc2", size = 14702079 }, - { url = "https://files.pythonhosted.org/packages/26/09/7a9520315decd2334afa65ed258fed438f070e31f05a2e43dd480a5e5911/ruff-0.14.9-py3-none-win_arm64.whl", hash = "sha256:8e821c366517a074046d92f0e9213ed1c13dbc5b37a7fc20b07f79b64d62cc84", size = 13744730 }, +version = "0.15.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/77/9b/840e0039e65fcf12758adf684d2289024d6140cde9268cc59887dc55189c/ruff-0.15.5.tar.gz", hash = "sha256:7c3601d3b6d76dce18c5c824fc8d06f4eef33d6df0c21ec7799510cde0f159a2", size = 4574214 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/20/5369c3ce21588c708bcbe517a8fbe1a8dfdb5dfd5137e14790b1da71612c/ruff-0.15.5-py3-none-linux_armv6l.whl", hash = "sha256:4ae44c42281f42e3b06b988e442d344a5b9b72450ff3c892e30d11b29a96a57c", size = 10478185 }, + { url = "https://files.pythonhosted.org/packages/44/ed/e81dd668547da281e5dce710cf0bc60193f8d3d43833e8241d006720e42b/ruff-0.15.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6edd3792d408ebcf61adabc01822da687579a1a023f297618ac27a5b51ef0080", size = 10859201 }, + { url = "https://files.pythonhosted.org/packages/c4/8f/533075f00aaf19b07c5cd6aa6e5d89424b06b3b3f4583bfa9c640a079059/ruff-0.15.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:89f463f7c8205a9f8dea9d658d59eff49db05f88f89cc3047fb1a02d9f344010", size = 10184752 }, + { url = "https://files.pythonhosted.org/packages/66/0e/ba49e2c3fa0395b3152bad634c7432f7edfc509c133b8f4529053ff024fb/ruff-0.15.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba786a8295c6574c1116704cf0b9e6563de3432ac888d8f83685654fe528fd65", size = 10534857 }, + { url = "https://files.pythonhosted.org/packages/59/71/39234440f27a226475a0659561adb0d784b4d247dfe7f43ffc12dd02e288/ruff-0.15.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd4b801e57955fe9f02b31d20375ab3a5c4415f2e5105b79fb94cf2642c91440", size = 10309120 }, + { url = "https://files.pythonhosted.org/packages/f5/87/4140aa86a93df032156982b726f4952aaec4a883bb98cb6ef73c347da253/ruff-0.15.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391f7c73388f3d8c11b794dbbc2959a5b5afe66642c142a6effa90b45f6f5204", size = 11047428 }, + { url = "https://files.pythonhosted.org/packages/5a/f7/4953e7e3287676f78fbe85e3a0ca414c5ca81237b7575bdadc00229ac240/ruff-0.15.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dc18f30302e379fe1e998548b0f5e9f4dff907f52f73ad6da419ea9c19d66c8", size = 11914251 }, + { url = "https://files.pythonhosted.org/packages/77/46/0f7c865c10cf896ccf5a939c3e84e1cfaeed608ff5249584799a74d33835/ruff-0.15.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc6e7f90087e2d27f98dc34ed1b3ab7c8f0d273cc5431415454e22c0bd2a681", size = 11333801 }, + { url = "https://files.pythonhosted.org/packages/d3/01/a10fe54b653061585e655f5286c2662ebddb68831ed3eaebfb0eb08c0a16/ruff-0.15.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1cb7169f53c1ddb06e71a9aebd7e98fc0fea936b39afb36d8e86d36ecc2636a", size = 11206821 }, + { url = "https://files.pythonhosted.org/packages/7a/0d/2132ceaf20c5e8699aa83da2706ecb5c5dcdf78b453f77edca7fb70f8a93/ruff-0.15.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9b037924500a31ee17389b5c8c4d88874cc6ea8e42f12e9c61a3d754ff72f1ca", size = 11133326 }, + { url = "https://files.pythonhosted.org/packages/72/cb/2e5259a7eb2a0f87c08c0fe5bf5825a1e4b90883a52685524596bfc93072/ruff-0.15.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:65bb414e5b4eadd95a8c1e4804f6772bbe8995889f203a01f77ddf2d790929dd", size = 10510820 }, + { url = "https://files.pythonhosted.org/packages/ff/20/b67ce78f9e6c59ffbdb5b4503d0090e749b5f2d31b599b554698a80d861c/ruff-0.15.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d20aa469ae3b57033519c559e9bc9cd9e782842e39be05b50e852c7c981fa01d", size = 10302395 }, + { url = "https://files.pythonhosted.org/packages/5f/e5/719f1acccd31b720d477751558ed74e9c88134adcc377e5e886af89d3072/ruff-0.15.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:15388dd28c9161cdb8eda68993533acc870aa4e646a0a277aa166de9ad5a8752", size = 10754069 }, + { url = "https://files.pythonhosted.org/packages/c3/9c/d1db14469e32d98f3ca27079dbd30b7b44dbb5317d06ab36718dee3baf03/ruff-0.15.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b30da330cbd03bed0c21420b6b953158f60c74c54c5f4c1dabbdf3a57bf355d2", size = 11304315 }, + { url = "https://files.pythonhosted.org/packages/28/3a/950367aee7c69027f4f422059227b290ed780366b6aecee5de5039d50fa8/ruff-0.15.5-py3-none-win32.whl", hash = "sha256:732e5ee1f98ba5b3679029989a06ca39a950cced52143a0ea82a2102cb592b74", size = 10551676 }, + { url = "https://files.pythonhosted.org/packages/b8/00/bf077a505b4e649bdd3c47ff8ec967735ce2544c8e4a43aba42ee9bf935d/ruff-0.15.5-py3-none-win_amd64.whl", hash = "sha256:821d41c5fa9e19117616c35eaa3f4b75046ec76c65e7ae20a333e9a8696bc7fe", size = 11678972 }, + { url = "https://files.pythonhosted.org/packages/fe/4e/cd76eca6db6115604b7626668e891c9dd03330384082e33662fb0f113614/ruff-0.15.5-py3-none-win_arm64.whl", hash = "sha256:b498d1c60d2fe5c10c45ec3f698901065772730b411f164ae270bb6bfcc4740b", size = 10965572 }, ] [[package]] @@ -1735,7 +1734,7 @@ wheels = [ [[package]] name = "scalene" -version = "1.5.55" +version = "2.1.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, @@ -1745,25 +1744,25 @@ dependencies = [ { name = "nvidia-ml-py", marker = "sys_platform != 'darwin'" }, { name = "psutil" }, { name = "pydantic" }, + { name = "pyyaml" }, { name = "rich" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/75/65/e57f87fd92aa8c14f5404dc04542054afbc41c1ba8e9e86f4414a58983e9/scalene-1.5.55.tar.gz", hash = "sha256:71c0c89287f46f9f1fa965def5866156313a949ed592b8acb008f8cafcf7c518", size = 9331156 } +sdist = { url = "https://files.pythonhosted.org/packages/7d/44/bd111beb3ccbeab825b3406b224f93de3c45b37b7618ea0dc4d6c75e3259/scalene-2.1.4.tar.gz", hash = "sha256:b712450203332f52e982db650e4f811956577fd29b7cca384a21dd4908e541c2", size = 9466993 } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/da/e270ae7e92f7ee2311af1125047f77a5f81ad45568cceae3e20f0b03750a/scalene-1.5.55-cp310-cp310-macosx_13_0_universal2.whl", hash = "sha256:441db462f1f69e11eb0fad6c9d061df4882287c99c4a2430b117dcbd84f4b7d8", size = 1135686 }, - { url = "https://files.pythonhosted.org/packages/ab/22/1abaf12312fb1f0df1e0533b968bfccb10f451a83ff3f0c041f8c9ef85d1/scalene-1.5.55-cp310-cp310-macosx_15_0_universal2.whl", hash = "sha256:2c6906abab7481935449a3f8fd460db3b8a9d9971ba14a5db44b95150931e1de", size = 1134464 }, - { url = "https://files.pythonhosted.org/packages/c7/80/59811e59e1ce2937b92c61fcb60aa31e76a60e70f41854f93cf33fc921ff/scalene-1.5.55-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7de65acfb441df94cb32d60e960c6335a6fe1617827a3da4cbb07816d12f372a", size = 1410961 }, - { url = "https://files.pythonhosted.org/packages/b1/d0/e05fadab789effd32c32b47a822106b65455db70736e968c612820770e62/scalene-1.5.55-cp310-cp310-win_amd64.whl", hash = "sha256:6707b7ddb87ca22fb55b1605960f9bae2a7f68f9acfb161f6b0816079b5496cf", size = 1025322 }, - { url = "https://files.pythonhosted.org/packages/10/fd/a88a2355a6290dffd32f7771ceb425aa42fa2b181f28a00df5a25797ce6a/scalene-1.5.55-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:58c0ed50c159df32887fe91abde01e1e152d270814ffa892edbd79f132d3ffb3", size = 1136476 }, - { url = "https://files.pythonhosted.org/packages/cf/65/032be96f3d5dd548dfe60aed6403d367a35a76d712b6fd919527e51363e9/scalene-1.5.55-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:be4f5c58c8e31a8f7262c7c2bf39855b8573570ccb99e235055141a00f72fec6", size = 1134545 }, - { url = "https://files.pythonhosted.org/packages/f5/94/dfc47c4a3ffeaea541885296699961832cbf79328e432a565148218917cb/scalene-1.5.55-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9651c24636edf72e07d57ee24cc636b19ce4dbf3c0ca9d1d527fc69d20e1ae5c", size = 1411207 }, - { url = "https://files.pythonhosted.org/packages/d9/9a/616850c58f987aa889685431cfdce5bcfd0e7444fa20acdc750809a015c5/scalene-1.5.55-cp312-cp312-macosx_13_0_universal2.whl", hash = "sha256:1750f70215762dec3b7998fecbaf9b59704d18711f24258adf15d54cecf42e33", size = 1136310 }, - { url = "https://files.pythonhosted.org/packages/f2/e4/ba77dd1a9b3415287d9ee73909a4527799eaf44908fc8abe9ddb94ac8887/scalene-1.5.55-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:8bb094a887062bb12e91a81cc8b69c73a5f35a42da3cc6a67e92f8a3eea72af7", size = 1134356 }, - { url = "https://files.pythonhosted.org/packages/37/b5/8b67429f201b74c576794a4fbfc7fa401d2f4570ba3aa98d9922e3a6f5a8/scalene-1.5.55-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c31ddee2590bcb55c53d9562b6aa8989c8c975b49777a0b4beb5d68ac7804d8c", size = 1411709 }, - { url = "https://files.pythonhosted.org/packages/ab/21/fe23516085fb57686bf3ce5573f83b28e12a7c994b96ea28b214b022aff9/scalene-1.5.55-cp312-cp312-win_amd64.whl", hash = "sha256:c86c06a88a1714f5a77c609e69153702a30e679a015045ba53e504aea19ede4b", size = 1025322 }, - { url = "https://files.pythonhosted.org/packages/a4/b9/9c0279f95e254eff8880d65687007f8ff3ec955fb0e0a3c3d93694a1ef7b/scalene-1.5.55-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:34fd559e5043d91b17e450bd3373dec0279881959314be4db47bedaa9da065a9", size = 1136330 }, - { url = "https://files.pythonhosted.org/packages/31/84/a21828d85f94bbb053268c4513bef0f7e5c168ecb1e21315bc66f598c87f/scalene-1.5.55-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:c625fd30a3b73b98ae1acd2cb2268b0f654987021192277c62c0e6e0883cd0ae", size = 1134342 }, - { url = "https://files.pythonhosted.org/packages/bb/b4/33636da3cd6ed2a2bea19907c4c64a630931eb0fb6697a27735234ab4282/scalene-1.5.55-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4f42a8daaf7a17beca104d44dafc704617f35af385305baa27ed489bb2f2dc1", size = 1411645 }, - { url = "https://files.pythonhosted.org/packages/d0/5d/c620fd816a05b979cb5b61c8c18128e2136214a0e50b755231dfd4f4f0b4/scalene-1.5.55-cp313-cp313-win_amd64.whl", hash = "sha256:57daf3072f88e7fdda3bc94d0e75f30733268f033fed76f1b909c59315926634", size = 1025323 }, + { url = "https://files.pythonhosted.org/packages/1f/3d/2786c6b1f9ab482656fa67347b72db82a7a97705ac5635b19a74893a9470/scalene-2.1.4-cp310-cp310-macosx_15_0_universal2.whl", hash = "sha256:82c183368331257e8780484eb5cdfe75943068372b0f14a8c41201cd10f75a10", size = 1216441 }, + { url = "https://files.pythonhosted.org/packages/c9/3b/90b87ce2396cec0934f7480a780a7a4531e38319f2fafbc4ab45feb5a5fa/scalene-2.1.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5f23b41cc1628cf8139f47759d947830e2641443cfe37df67b48fb323344be36", size = 1508767 }, + { url = "https://files.pythonhosted.org/packages/11/7b/6b5302765a602f1a02b78830b178cc0a58682afbe020ae60c113375bce88/scalene-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe8b88ae7809a188e1702b821006f7798d6e496215ef3c0c8ed3e4253c3bcfaa", size = 1150547 }, + { url = "https://files.pythonhosted.org/packages/1c/78/4ec8fd95151db369ce148bd3941dc829fc38c3af6173fbe1c8fa7fed54bb/scalene-2.1.4-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:5af958bf19511c72ddf812d2a38edf8fe3937fc7ccb63d434083326f966bcc42", size = 1216500 }, + { url = "https://files.pythonhosted.org/packages/e5/44/99575a3caaf7bee9abe41710f7215b3dad379f52de6aaa7e458ac1399782/scalene-2.1.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fa843b58fa4ef68032de4ea9eb5e788443a8d765c5fe50582febb2afc7b92d94", size = 1509486 }, + { url = "https://files.pythonhosted.org/packages/2a/52/f27d44a8f673eb1340cefcfb6544f755b7d8ff1a3c8794eccec365b5ad76/scalene-2.1.4-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:c3d0ad5e3d290afb6806d99134e1022abf3e29759b6d01c86bdb4116f10d2400", size = 1216279 }, + { url = "https://files.pythonhosted.org/packages/80/a7/03aad59b0f686429dbeb51af5e033448a46a32671ccf4dea07c592eb1f3d/scalene-2.1.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bb2f0e105a4e53db106b55648c6ad3023095b65f676a37291990877ad24096c", size = 1509935 }, + { url = "https://files.pythonhosted.org/packages/f4/28/e80484e6dc8d5db273b6559ad73b575d400bef016f5d3080127ebe3d2b27/scalene-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:a2893c8466ad15be1322aa9705ea1e227aea0b571a9e124908d74caabfe52618", size = 1150570 }, + { url = "https://files.pythonhosted.org/packages/51/ba/ac8f7724d68cd2aed6447c061dff392faeeb42cbf7bc1bc620457ea2e95e/scalene-2.1.4-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:376b86c5e4bb7d3eabb9d601308c14d6527fbf9d21b2aa78b14ca0a91d18717d", size = 1220985 }, + { url = "https://files.pythonhosted.org/packages/96/17/dd94be533eabe71c2c19f3f2c56cbaa8db10be34a9cc5f55b5081075d30e/scalene-2.1.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e96097094f5b42b7db8185a513a6e10c8c85bc6ac893723303fe2c93c1393f4", size = 1519792 }, + { url = "https://files.pythonhosted.org/packages/f3/00/37b93ddf3d14fcca1f9060e3edb50a08c250bae9503939df7e25f8c4195e/scalene-2.1.4-cp313-cp313-win_amd64.whl", hash = "sha256:943b084efab02200868e1cd8cb1e933e7f39381dbf1afa7aa61f3063af49c4c6", size = 1152194 }, + { url = "https://files.pythonhosted.org/packages/35/d0/32e8b47875fe9b36e4c277bd0d94056692119663ed521af47d80eec3683f/scalene-2.1.4-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:0f4d3e1b8fff5a21e77afc1045e600e7cce6079cec06b013b96af05e40479c88", size = 1221237 }, + { url = "https://files.pythonhosted.org/packages/b4/6d/23d3e966cfe67c2225346a59c117d05bd362dc3cb9ae0d64c83f7a8b8945/scalene-2.1.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:336b50c4bbcc0e1b6343dc991770f4c6d382d05296b1f65ec02a0c58c8a6a385", size = 1519605 }, + { url = "https://files.pythonhosted.org/packages/f6/8b/2867283097d8ba657b613c1448891f7f58151934ab779180d2cc962f9bd8/scalene-2.1.4-cp314-cp314-win_amd64.whl", hash = "sha256:03ba0d73996b7a0fd37ec581768d180487414d8a7c429e7ee6e876e8f3fe08ab", size = 1158151 }, ] [[package]] @@ -1777,7 +1776,7 @@ wheels = [ [[package]] name = "testcontainers" -version = "4.13.3" +version = "4.14.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docker" }, @@ -1786,9 +1785,9 @@ dependencies = [ { name = "urllib3" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/b3/c272537f3ea2f312555efeb86398cc382cd07b740d5f3c730918c36e64e1/testcontainers-4.13.3.tar.gz", hash = "sha256:9d82a7052c9a53c58b69e1dc31da8e7a715e8b3ec1c4df5027561b47e2efe646", size = 79064 } +sdist = { url = "https://files.pythonhosted.org/packages/8b/02/ef62dec9e4f804189c44df23f0b86897c738d38e9c48282fcd410308632f/testcontainers-4.14.1.tar.gz", hash = "sha256:316f1bb178d829c003acd650233e3ff3c59a833a08d8661c074f58a4fbd42a64", size = 80148 } wheels = [ - { url = "https://files.pythonhosted.org/packages/73/27/c2f24b19dafa197c514abe70eda69bc031c5152c6b1f1e5b20099e2ceedd/testcontainers-4.13.3-py3-none-any.whl", hash = "sha256:063278c4805ffa6dd85e56648a9da3036939e6c0ac1001e851c9276b19b05970", size = 124784 }, + { url = "https://files.pythonhosted.org/packages/c8/31/5e7b23f9e43ff7fd46d243808d70c5e8daf3bc08ecf5a7fb84d5e38f7603/testcontainers-4.14.1-py3-none-any.whl", hash = "sha256:03dfef4797b31c82e7b762a454b6afec61a2a512ad54af47ab41e4fa5415f891", size = 125640 }, ] [package.optional-dependencies]