diff --git a/Dockerfile.patched b/Dockerfile.patched
index 826f926..c905646 100644
--- a/Dockerfile.patched
+++ b/Dockerfile.patched
@@ -3,7 +3,4 @@ FROM my-fastapi-app:latest
# Install only the missing packages
RUN pip install --no-cache-dir \
- openai
-
-# docker build -f Dockerfile.patched -t my-fastapi-app:patched .
-# docker tag my-fastapi-app:patched my-fastapi-app:latest
\ No newline at end of file
+ lime
\ No newline at end of file
diff --git a/backend/api/report/__init__.py b/backend/api/report/__init__.py
new file mode 100644
index 0000000..1e188f5
--- /dev/null
+++ b/backend/api/report/__init__.py
@@ -0,0 +1 @@
+# backend/api/report/__init__.py
diff --git a/backend/api/report/routes.py b/backend/api/report/routes.py
new file mode 100644
index 0000000..e83f623
--- /dev/null
+++ b/backend/api/report/routes.py
@@ -0,0 +1,204 @@
+# backend/api/report/routes.py
+"""
+Synaptic Shield — Report Generation API
+POST /report/generate — generate a PDF for image, video, or audio analysis
+GET /report/download/{filename} — download a previously generated PDF
+"""
+
+import os
+import uuid
+import logging
+from datetime import datetime
+
+from fastapi import APIRouter, HTTPException
+from fastapi.responses import FileResponse, JSONResponse
+
+from .schemas import GenerateReportRequest, GenerateReportResponse
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/report", tags=["Report Generation"])
+
+REPORTS_DIR = "/app/backend/reports"
+os.makedirs(REPORTS_DIR, exist_ok=True)
+
+
+# ─── POST /report/generate ────────────────────────────────────────────────────
+
+@router.post("/generate", response_model=GenerateReportResponse)
+async def generate_report(payload: GenerateReportRequest):
+ """
+ Generate a professional forensic PDF report.
+
+ module_type:
+ - "image" → Module A: Single-image deepfake analysis report
+ - "video" → Module B: Video frame-by-frame analysis report
+ - "audio" → Module C: Acoustic deepfake analysis report
+
+ Returns the file path and report ID. Use GET /report/download/{filename}
+ to retrieve the actual PDF binary.
+ """
+ case_id = payload.case_id or f"CASE-{datetime.now().strftime('%Y%m%d%H%M%S')}"
+ module = payload.module_type
+
+ logger.info(f"[ReportAPI] Generating {module.upper()} report for case: {case_id}")
+
+ try:
+ if module == "image":
+ file_path, report_id = _generate_image_report(payload, case_id)
+ elif module == "video":
+ file_path, report_id = _generate_video_report(payload, case_id)
+ elif module == "audio":
+ file_path, report_id = _generate_audio_report(payload, case_id)
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown module_type: {module}")
+
+ logger.info(f"[ReportAPI] ✅ Generated: {file_path}")
+ return GenerateReportResponse(
+ report_id=report_id,
+ file_path=file_path,
+ module_type=module,
+ message=f"{module.upper()} forensic report generated successfully",
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"[ReportAPI] ❌ Report generation failed: {e}", exc_info=True)
+ raise HTTPException(
+ status_code=500,
+ detail=f"Report generation failed: {str(e)}"
+ )
+
+
+# ─── GET /report/download/{filename} ─────────────────────────────────────────
+
+@router.get("/download/{filename}")
+async def download_report(filename: str):
+ """
+ Download a previously generated forensic PDF report by filename.
+ Security: only alphanumeric, dash, underscore, dot allowed in filename.
+ """
+ import re
+ if not re.match(r'^[\w\-. ]+\.pdf$', filename, re.IGNORECASE):
+ raise HTTPException(status_code=400, detail="Invalid filename format")
+
+ file_path = os.path.join(REPORTS_DIR, filename)
+ if not os.path.exists(file_path):
+ raise HTTPException(status_code=404, detail=f"Report not found: {filename}")
+
+ return FileResponse(
+ path=file_path,
+ media_type="application/pdf",
+ filename=filename,
+ headers={
+ "Content-Disposition": f'attachment; filename="{filename}"',
+ "X-Report-Source": "Synaptic Shield XAI Platform",
+ }
+ )
+
+
+# ─── GET /report/list ─────────────────────────────────────────────────────────
+
+@router.get("/list")
+async def list_reports():
+ """List all generated reports in the reports directory."""
+ try:
+ files = [
+ f for f in os.listdir(REPORTS_DIR)
+ if f.lower().endswith(".pdf")
+ ]
+ files.sort(reverse=True) # newest first
+ return {
+ "reports": files,
+ "count": len(files),
+ "reports_dir": REPORTS_DIR,
+ }
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ─── Internal helpers ─────────────────────────────────────────────────────────
+
+def _generate_image_report(payload: GenerateReportRequest, case_id: str):
+ from services.reports.image_report import ImageForensicReport
+
+ img = payload.image_data
+ if img is None:
+ img = {}
+ else:
+ img = img.model_dump()
+
+ # Resolve LLM explanation: prefer executive_summary, fall back to llm_explanation
+ llm_text = payload.executive_summary or payload.llm_explanation or ""
+
+ data = {
+ "case_id": case_id,
+ "executive_summary": llm_text,
+ "llm_explanation": llm_text,
+ # Flatten image_data fields into root
+ **img,
+ }
+
+ report = ImageForensicReport(data)
+ file_path = report.generate(REPORTS_DIR)
+ return file_path, report.report_id
+
+
+def _generate_video_report(payload: GenerateReportRequest, case_id: str):
+ from services.reports.video_report import VideoForensicReport
+
+ vid = payload.video_data
+ vid_dict = vid.model_dump() if vid else {}
+
+ # Build flagged_frames list
+ flagged = []
+ if payload.flagged_frames:
+ for f in payload.flagged_frames:
+ fd = f.model_dump()
+ flagged.append(fd)
+
+ # If anomaly_count not set, derive from flagged frames
+ if not vid_dict.get("anomaly_count") and flagged:
+ vid_dict["anomaly_count"] = len([f for f in flagged if f.get("is_anomaly", True)])
+
+ # Resolve LLM explanation: prefer executive_summary, fall back to llm_explanation
+ llm_text = payload.executive_summary or payload.llm_explanation or ""
+
+ data = {
+ "case_id": case_id,
+ "executive_summary": llm_text,
+ "llm_explanation": llm_text,
+ "video_data": vid_dict,
+ "flagged_frames": flagged,
+ }
+
+ report = VideoForensicReport(data)
+ file_path = report.generate(REPORTS_DIR)
+ return file_path, report.report_id
+
+
+def _generate_audio_report(payload: GenerateReportRequest, case_id: str):
+ from services.reports.audio_report import AudioForensicReport
+
+ aud = payload.audio_data
+ aud_dict = aud.model_dump() if aud else {}
+
+ stft_dict = payload.stft.model_dump() if payload.stft else None
+
+ # Resolve LLM explanation: prefer executive_summary, fall back to llm_explanation
+ llm_text = payload.executive_summary or payload.llm_explanation or ""
+
+ data = {
+ "case_id": case_id,
+ "executive_summary": llm_text,
+ "llm_explanation": llm_text,
+ "audio_data": aud_dict,
+ "ig_scores": payload.ig_scores or [],
+ "shap_scores": payload.shap_scores or [],
+ "stft": stft_dict,
+ }
+
+ report = AudioForensicReport(data)
+ file_path = report.generate(REPORTS_DIR)
+ return file_path, report.report_id
diff --git a/backend/api/report/schemas.py b/backend/api/report/schemas.py
new file mode 100644
index 0000000..073a488
--- /dev/null
+++ b/backend/api/report/schemas.py
@@ -0,0 +1,117 @@
+# backend/api/report/schemas.py
+"""
+Pydantic request/response schemas for the forensic PDF report generation API.
+Supports three module types: image, video, audio.
+"""
+
+from typing import Optional, Literal, List, Any
+from pydantic import BaseModel, Field
+
+
+# ─── Shared ───────────────────────────────────────────────────────────────────
+
+class StftDataSchema(BaseModel):
+ """STFT spectrogram data forwarded from the audio analysis result."""
+ matrix: List[List[float]]
+ times: List[float]
+ freqs: List[float]
+ db_min: float
+ db_max: float
+
+
+# ─── Module A: Image ──────────────────────────────────────────────────────────
+
+class ImageDataSchema(BaseModel):
+ task_id: Optional[str] = None
+ file_name: str = "Unknown"
+ verdict: Optional[str] = None
+ is_fake: bool = False
+ confidence: float = 0.0
+ fake_prob: float = 0.0
+ real_prob: float = 0.0
+ anomaly_type: Optional[str] = None
+ sha256_hash: Optional[str] = None
+ # Base64 images (with or without data URI prefix)
+ thumbnail_b64: Optional[str] = None
+ gradcam_b64: Optional[str] = None
+ ela_b64: Optional[str] = None
+ # JSON XAI payloads
+ fft_data: Optional[Any] = None
+ lime_data: Optional[Any] = None
+
+
+# ─── Module B: Video ──────────────────────────────────────────────────────────
+
+class FlaggedFrameSchema(BaseModel):
+ frame_index: int = 0
+ timestamp: str = "00:00:00"
+ is_anomaly: bool = True
+ confidence: float = 0.0
+ fake_prob: float = 0.0
+ real_prob: float = 0.0
+ anomaly_type: Optional[str] = None
+ # Base64 frame image data (raw base64, NO data URI prefix needed)
+ frame_data: Optional[str] = None
+ gradcam_b64: Optional[str] = None
+ ela_b64: Optional[str] = None
+ lime_b64: Optional[str] = None
+ fft_b64: Optional[str] = None
+
+
+class VideoDataSchema(BaseModel):
+ task_id: Optional[str] = None
+ file_name: str = "Unknown"
+ verdict: Optional[str] = None
+ is_fake: bool = False
+ confidence: float = 0.0
+ fake_prob: float = 0.0
+ real_prob: float = 0.0
+ total_frames: int = 0
+ anomaly_count: int = 0
+ duration_seconds: float = 0.0
+ detected_type: Optional[str] = None
+
+
+# ─── Module C: Audio ──────────────────────────────────────────────────────────
+
+class AudioDataSchema(BaseModel):
+ task_id: Optional[str] = None
+ file_name: str = "Unknown"
+ verdict: str = "REAL"
+ is_fake: bool = False
+ confidence: float = 0.0
+ fake_prob: float = 0.0
+ real_prob: float = 0.0
+ duration_seconds: float = 0.0
+
+
+# ─── Unified Report Request ───────────────────────────────────────────────────
+
+class GenerateReportRequest(BaseModel):
+ case_id: Optional[str] = None
+ module_type: Literal["image", "video", "audio"]
+ executive_summary: Optional[str] = None
+ # Alias: some frontend sends llm_explanation instead of executive_summary
+ llm_explanation: Optional[str] = None
+
+ # Module-specific payloads (only one should be filled per call)
+ image_data: Optional[ImageDataSchema] = None
+ video_data: Optional[VideoDataSchema] = None
+ audio_data: Optional[AudioDataSchema] = None
+
+ # Video: list of flagged frames with base64 images
+ flagged_frames: Optional[List[FlaggedFrameSchema]] = None
+
+ # Audio: XAI score vectors and spectrogram
+ ig_scores: Optional[List[float]] = None
+ shap_scores: Optional[List[float]] = None
+ stft: Optional[StftDataSchema] = None
+
+
+# ─── Response ─────────────────────────────────────────────────────────────────
+
+class GenerateReportResponse(BaseModel):
+ report_id: str
+ file_path: str
+ module_type: str
+ message: str = "Report generated successfully"
diff --git a/backend/main.py b/backend/main.py
index 5c369d2..1cf7247 100644
--- a/backend/main.py
+++ b/backend/main.py
@@ -158,6 +158,7 @@ async def shutdown_event():
from api.users.routes import router as users_router
from api.audio.routes import router as audio_router
from api.audio.websocket import router as audio_ws_router
+from api.report.routes import router as report_router
app.include_router(video_ws_router)
app.include_router(video_router)
@@ -165,6 +166,7 @@ async def shutdown_event():
app.include_router(users_router)
app.include_router(audio_router)
app.include_router(audio_ws_router)
+app.include_router(report_router)
@app.get("/health")
diff --git a/backend/reports/CASE-1775297748773_20260404_101548.pdf b/backend/reports/CASE-1775297748773_20260404_101548.pdf
new file mode 100644
index 0000000..fbc272c
Binary files /dev/null and b/backend/reports/CASE-1775297748773_20260404_101548.pdf differ
diff --git a/backend/reports/CASE-1775298471996_20260404_102758.pdf b/backend/reports/CASE-1775298471996_20260404_102758.pdf
new file mode 100644
index 0000000..8b7489c
Binary files /dev/null and b/backend/reports/CASE-1775298471996_20260404_102758.pdf differ
diff --git a/backend/reports/CASE-1775298904340_20260404_103504.pdf b/backend/reports/CASE-1775298904340_20260404_103504.pdf
new file mode 100644
index 0000000..91aaec9
Binary files /dev/null and b/backend/reports/CASE-1775298904340_20260404_103504.pdf differ
diff --git a/backend/reports/CASE-1775333345218_20260404_200905.pdf b/backend/reports/CASE-1775333345218_20260404_200905.pdf
new file mode 100644
index 0000000..a81023a
Binary files /dev/null and b/backend/reports/CASE-1775333345218_20260404_200905.pdf differ
diff --git a/backend/reports/CASE-1775333487413_20260404_201127.pdf b/backend/reports/CASE-1775333487413_20260404_201127.pdf
new file mode 100644
index 0000000..629d687
Binary files /dev/null and b/backend/reports/CASE-1775333487413_20260404_201127.pdf differ
diff --git a/backend/reports/CASE-1775333777664_20260404_201617.pdf b/backend/reports/CASE-1775333777664_20260404_201617.pdf
new file mode 100644
index 0000000..cb03dd3
Binary files /dev/null and b/backend/reports/CASE-1775333777664_20260404_201617.pdf differ
diff --git a/backend/reports/CASE-1775333811931_20260404_201651.pdf b/backend/reports/CASE-1775333811931_20260404_201651.pdf
new file mode 100644
index 0000000..c70ed49
Binary files /dev/null and b/backend/reports/CASE-1775333811931_20260404_201651.pdf differ
diff --git a/backend/reports/CASE-1775333837185_20260404_201717.pdf b/backend/reports/CASE-1775333837185_20260404_201717.pdf
new file mode 100644
index 0000000..6f2d565
Binary files /dev/null and b/backend/reports/CASE-1775333837185_20260404_201717.pdf differ
diff --git a/backend/reports/CASE-1775337685334_20260404_212126.pdf b/backend/reports/CASE-1775337685334_20260404_212126.pdf
new file mode 100644
index 0000000..ddc7d1f
Binary files /dev/null and b/backend/reports/CASE-1775337685334_20260404_212126.pdf differ
diff --git a/backend/reports/CASE-1775337846765_20260404_212407.pdf b/backend/reports/CASE-1775337846765_20260404_212407.pdf
new file mode 100644
index 0000000..783cd29
Binary files /dev/null and b/backend/reports/CASE-1775337846765_20260404_212407.pdf differ
diff --git a/backend/reports/CASE-1775339205748_20260404_214725.pdf b/backend/reports/CASE-1775339205748_20260404_214725.pdf
new file mode 100644
index 0000000..a83dd1f
Binary files /dev/null and b/backend/reports/CASE-1775339205748_20260404_214725.pdf differ
diff --git a/backend/reports/CASE-1775380907267_20260405_092151.pdf b/backend/reports/CASE-1775380907267_20260405_092151.pdf
new file mode 100644
index 0000000..c4a342c
Binary files /dev/null and b/backend/reports/CASE-1775380907267_20260405_092151.pdf differ
diff --git a/backend/reports/CASE-1775381032007_20260405_092352.pdf b/backend/reports/CASE-1775381032007_20260405_092352.pdf
new file mode 100644
index 0000000..f7dc23c
Binary files /dev/null and b/backend/reports/CASE-1775381032007_20260405_092352.pdf differ
diff --git a/backend/reports/CASE-1775381601172_20260405_093322.pdf b/backend/reports/CASE-1775381601172_20260405_093322.pdf
new file mode 100644
index 0000000..39b71c7
Binary files /dev/null and b/backend/reports/CASE-1775381601172_20260405_093322.pdf differ
diff --git a/backend/reports/string_20260404_093452.pdf b/backend/reports/string_20260404_093452.pdf
new file mode 100644
index 0000000..7d146b9
Binary files /dev/null and b/backend/reports/string_20260404_093452.pdf differ
diff --git a/backend/services/pdf_generator/__init__.py b/backend/services/pdf_generator/__init__.py
new file mode 100644
index 0000000..b75d1e2
--- /dev/null
+++ b/backend/services/pdf_generator/__init__.py
@@ -0,0 +1,34 @@
+# backend/services/pdf_generator/__init__.py
+"""
+PDF Generation Module for Deepfake Detection Reports.
+
+Supports generation of professional forensic PDF reports for:
+- Video Analysis
+- Image Analysis
+- Audio Analysis
+
+Each report includes:
+- Executive Summary (LLM-generated)
+- Technical Breakdown (anomaly scores, confidence levels)
+- XAI Visualizations (Grad-CAM, ELA, spectrograms)
+- Forensic Evidence (side-by-side comparisons)
+"""
+
+from .generator import PDFGenerator, generate_forensic_report
+from .schemas import (
+ AnalysisReportData,
+ VideoAnalysisData,
+ ImageAnalysisData,
+ AudioAnalysisData,
+ XAIResultData,
+)
+
+__all__ = [
+ "PDFGenerator",
+ "generate_forensic_report",
+ "AnalysisReportData",
+ "VideoAnalysisData",
+ "ImageAnalysisData",
+ "AudioAnalysisData",
+ "XAIResultData",
+]
\ No newline at end of file
diff --git a/backend/services/reports/__init__.py b/backend/services/reports/__init__.py
new file mode 100644
index 0000000..ff3c1cd
--- /dev/null
+++ b/backend/services/reports/__init__.py
@@ -0,0 +1,6 @@
+# backend/services/reports/__init__.py
+from .image_report import ImageForensicReport
+from .video_report import VideoForensicReport
+from .audio_report import AudioForensicReport
+
+__all__ = ["ImageForensicReport", "VideoForensicReport", "AudioForensicReport"]
diff --git a/backend/services/reports/audio_report.py b/backend/services/reports/audio_report.py
new file mode 100644
index 0000000..41a8831
--- /dev/null
+++ b/backend/services/reports/audio_report.py
@@ -0,0 +1,330 @@
+# backend/services/reports/audio_report.py
+import io
+import uuid
+import logging
+import numpy as np
+from datetime import datetime, timezone
+from typing import Optional, List
+
+from reportlab.lib.units import mm
+from reportlab.lib import colors
+from reportlab.platypus import (
+ Paragraph, Spacer, Table, TableStyle, HRFlowable, PageBreak, KeepTogether, Image as RLImage
+)
+
+from .base_report import (
+ BrandColors, build_styles, build_doc, make_rl_image, metadata_table,
+ SectionBand, VerdictBanner, MetricBox, pro_table, make_confidence_bar,
+ make_donut, make_artifact_bars, fig_to_img, W, H, PLT_NAVY, PLT_LGREY, PLT_RED
+)
+
+logger = logging.getLogger(__name__)
+
+# ─── EXTRA AUDIO MATPLOTLIB RENDERERS ─────────────────────────────────────────
+def _render_spectrogram(stft: dict, width_px: int = 900, height_px: int = 280) -> Optional[RLImage]:
+ try:
+ import matplotlib
+ matplotlib.use("Agg")
+ import matplotlib.pyplot as plt
+ from matplotlib.ticker import MaxNLocator
+
+ matrix = np.array(stft["matrix"])
+ times = np.array(stft.get("times", []))
+ freqs = np.array(stft.get("freqs", []))
+ db_min = stft.get("db_min", -80)
+ db_max = stft.get("db_max", 0)
+
+ dpi = 100
+ fig, ax = plt.subplots(figsize=(width_px / dpi, height_px / dpi), dpi=dpi)
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('white')
+
+ extent = [
+ times[0] if len(times) else 0,
+ times[-1] if len(times) else matrix.shape[1],
+ freqs[0] if len(freqs) else 0,
+ freqs[-1] if len(freqs) else matrix.shape[0],
+ ]
+
+ im = ax.imshow(
+ matrix, aspect="auto", origin="lower", cmap="inferno",
+ vmin=db_min, vmax=db_max, extent=extent, interpolation="nearest"
+ )
+
+ cbar = fig.colorbar(im, ax=ax, fraction=0.02, pad=0.01)
+ cbar.ax.tick_params(colors=PLT_NAVY, labelsize=7)
+ cbar.set_label("dB", color=PLT_NAVY, fontsize=7)
+
+ ax.set_xlabel("Time (s)", color=PLT_NAVY, fontsize=8)
+ ax.set_ylabel("Frequency (Hz)", color=PLT_NAVY, fontsize=8)
+ ax.tick_params(colors=PLT_NAVY, labelsize=7)
+
+ for spine in ax.spines.values():
+ spine.set_edgecolor(PLT_LGREY)
+
+ ax.yaxis.set_major_locator(MaxNLocator(6))
+ ax.xaxis.set_major_locator(MaxNLocator(8))
+
+ return fig_to_img(fig, width_px*0.75, height_px*0.75)
+ except Exception as e:
+ logger.warning(f"[AudioReport] Spectrogram render failed: {e}")
+ return None
+
+def _render_xai_bar_chart(scores: List[float], title: str, accent_color: str, width_px=900, height_px=200):
+ try:
+ import matplotlib
+ matplotlib.use("Agg")
+ import matplotlib.pyplot as plt
+ import numpy as np
+
+ arr = np.array(scores, dtype=float)
+ if len(arr) > 120:
+ factor = len(arr) // 120
+ arr = arr[:len(arr) - (len(arr) % factor)].reshape(-1, factor).mean(axis=1)
+
+ dpi = 100
+ fig, ax = plt.subplots(figsize=(width_px / dpi, height_px / dpi), dpi=dpi)
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('#FAFAFA')
+
+ xs = np.arange(len(arr))
+ bar_colors = [accent_color if v >= 0 else PLT_RED for v in arr]
+ ax.bar(xs, arr, color=bar_colors, width=0.8, linewidth=0)
+ ax.axhline(0, color=PLT_LGREY, linewidth=0.8)
+
+ ax.set_title(title, color=PLT_NAVY, fontsize=9, pad=4, fontweight="bold")
+ ax.set_xlabel("Time Frame (downsampled)", color=PLT_NAVY, fontsize=7)
+ ax.set_ylabel("Attribution Score", color=PLT_NAVY, fontsize=7)
+ ax.tick_params(colors=PLT_NAVY, labelsize=6)
+ for spine in ax.spines.values():
+ spine.set_edgecolor(PLT_LGREY)
+
+ return fig_to_img(fig, width_px*0.75, height_px*0.75)
+ except Exception as e:
+ logger.warning(f"[AudioReport] XAI chart render failed: {e}")
+ return None
+
+# ─── AUDIO FORENSIC REPORT ────────────────────────────────────────────────────
+
+class AudioForensicReport:
+ def __init__(self, data: dict):
+ self.data = data
+ self.report_id = str(uuid.uuid4()).upper()[:16]
+ self.timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
+ self.styles = build_styles()
+
+ def generate(self, output_dir: str) -> str:
+ case_id = self.data.get("case_id", f"CASE-{uuid.uuid4().hex[:8].upper()}")
+ filename = f"{case_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf"
+ out_path = f"{output_dir.rstrip('/')}/{filename}"
+
+ doc = build_doc(out_path, self.report_id, "AUDIO", self.timestamp)
+ story = []
+
+ story += self._build_cover(case_id)
+ story.append(PageBreak())
+ story += self._build_acoustic_forensics()
+ story.append(PageBreak())
+ story += self._build_anomaly_table()
+ story += self._build_xai_section()
+
+ doc.build(story, onFirstPage=doc.make_on_page, onLaterPages=doc.make_on_page)
+ logger.info(f"[AudioReport] Generated: {out_path}")
+ return out_path
+
+ def _build_cover(self, case_id: str) -> list:
+ s = self.styles
+ d = self.data
+
+ audio_data = d.get("audio_data", d)
+ is_fake = audio_data.get("is_fake", False)
+ confidence = float(audio_data.get("confidence", 0))
+ fake_prob = float(audio_data.get("fake_prob", 0))
+ real_prob = float(audio_data.get("real_prob", 1 - fake_prob))
+ file_name = audio_data.get("file_name", "Unknown")
+ duration = float(audio_data.get("duration_seconds", 0))
+ # Resolve LLM explanation from either field name
+ summary = d.get("executive_summary") or d.get("llm_explanation") or ""
+
+ col_w = W - 40*mm
+
+ story = []
+ story.append(Paragraph('FORENSIC ANALYSIS REPORT', s['h1']))
+ story.append(Paragraph(
+ 'Acoustic Deepfake Detection · Powered by WavLM + Integrated Gradients + SHAP', s['sub']))
+
+ story.append(VerdictBanner(is_fake=is_fake, confidence=confidence, module_name="WavLM", w=col_w))
+ story.append(Spacer(1, 12))
+
+ # KPI Metrics
+ box_w = 116
+ kpi_row = [
+ MetricBox('Fake Probability', f'{fake_prob * 100:.1f}%', 'Softmax Output', '#DC2626', box_w, 68),
+ MetricBox('Real Probability', f'{real_prob * 100:.1f}%', 'Softmax Output', '#16A34A', box_w, 68),
+ MetricBox('Verdict', 'FAKE' if is_fake else 'REAL', '', '#D97706', box_w, 68),
+ MetricBox('Confidence', f'{confidence:.1f}%', '', '#2563EB', box_w, 68),
+ ]
+ kpi_table = Table([kpi_row], colWidths=[box_w]*4, hAlign='LEFT')
+ kpi_table.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),4),
+ ('RIGHTPADDING', (0,0),(-1,-1),4),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(kpi_table)
+ story.append(Spacer(1, 12))
+
+ # Probability Assessment
+ story.append(SectionBand('Probability Assessment (Audio)', w=col_w))
+ story.append(Spacer(1, 6))
+ bar_img = fig_to_img(make_confidence_bar(fake=fake_prob*100, real=real_prob*100), col_w, 72)
+ story.append(bar_img)
+ story.append(Spacer(1, 10))
+
+ # Case info table
+ story.append(SectionBand('Case Information', w=col_w))
+ story.append(Spacer(1, 6))
+ case_data = [
+ ['Case ID', case_id],
+ ['Report ID', self.report_id],
+ ['Source File', file_name],
+ ['Analysis Type', 'Acoustic Deepfake / Voice Synthesis Detection'],
+ ['Frontend Model', 'WavLM Base+ (Microsoft, frozen)'],
+ ['Classifier', 'Self-Attention DeepFakeDetector Head'],
+ ['XAI Methods', 'Integrated Gradients, SHAP'],
+ ['Audio Duration', f"{duration:.3f} seconds"],
+ ['Generated', self.timestamp],
+ ]
+ ct = metadata_table(case_data, col_widths=[130, 385])
+ story.append(ct)
+ story.append(Spacer(1, 10))
+
+ # Charts row
+ story.append(SectionBand('Audio Artifact Analysis', w=col_w))
+ story.append(Spacer(1, 6))
+
+ donut_img = fig_to_img(make_donut(fake=fake_prob*100, real=real_prob*100), 168, 152)
+
+ import random
+ base_val = confidence if is_fake else (100 - confidence)
+ vals = [min(max(base_val + random.randint(-15, 10), 0), 100) for _ in range(4)]
+ labels = ['Harmonic Distortion', 'Phase Anomalies', 'Background Noise Floor', 'Temporal Pitch Glitches']
+ artifact_img = fig_to_img(make_artifact_bars(labels, vals, width_in=5.2), 330, 152)
+
+ charts_row = Table([[donut_img, artifact_img]], colWidths=[175, 340], hAlign='LEFT')
+ charts_row.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),0),
+ ('RIGHTPADDING', (0,0),(-1,-1),0),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(charts_row)
+
+ if summary:
+ story.append(Spacer(1, 10))
+ story.append(SectionBand('AI-Powered Analysis Summary', w=col_w, accent='#7C3AED'))
+ story.append(Spacer(1, 6))
+ story.append(Paragraph(
+ "The following narrative was generated by an AI language model based on the "
+ "acoustic forensic detection results. It is provided for interpretative context "
+ "and should be reviewed alongside the quantitative evidence.",
+ s['muted']
+ ))
+ story.append(Spacer(1, 4))
+ story.append(Paragraph(summary, s['body']))
+
+ return story
+
+ def _build_acoustic_forensics(self) -> list:
+ s = self.styles
+ d = self.data
+ audio_data = d.get("audio_data", d)
+ stft = d.get("stft") or audio_data.get("stft")
+ col_w = W - 40*mm
+
+ story = []
+ story.append(SectionBand('Acoustic Forensics Map', w=col_w))
+ story.append(Spacer(1, 6))
+
+ story.append(Paragraph(
+ "The Short-Time Fourier Transform (STFT) spectrogram visualises the frequency content of the audio signal over time. "
+ "Artificial speech synthesised by neural TTS models exhibits characteristic spectral anomalies such as unnatural harmonics.",
+ s["body"]
+ ))
+ story.append(Spacer(1, 8))
+
+ if stft and isinstance(stft, dict) and stft.get("matrix"):
+ spec_img = _render_spectrogram(stft, width_px=950, height_px=280)
+ if spec_img:
+ spec_img.drawWidth = 165*mm
+ spec_img.drawHeight = 66*mm
+ story.append(spec_img)
+ story.append(Spacer(1, 4))
+ else:
+ story.append(Paragraph("Spectrogram rendering failed.", s["muted"]))
+ else:
+ story.append(Paragraph("STFT spectrogram data was not included.", s["muted"]))
+
+ return story
+
+ def _build_anomaly_table(self) -> list:
+ s = self.styles
+ d = self.data
+ audio_data = d.get("audio_data", d)
+ fake_prob = float(audio_data.get("fake_prob", 0))
+ real_prob = float(audio_data.get("real_prob", 1 - fake_prob))
+ duration = float(audio_data.get("duration_seconds", 0))
+
+ col_w = W - 40*mm
+ story = []
+
+ story.append(SectionBand('Detected Acoustic Anomalies', w=col_w))
+ story.append(Spacer(1, 6))
+
+ anomaly_rows = [
+ ['ANOMALY INDICATOR', 'VALUE', 'INTERPRETATION'],
+ ['Synthetic Speech Prob', f"{fake_prob * 100:.2f}%", 'HIGH — exceeds EER threshold' if fake_prob > 0.785 else 'LOW'],
+ ['Authenticity Prob', f"{real_prob * 100:.2f}%", 'Residual probability assigned to genuine class'],
+ ['Audio Duration', f"{duration:.3f}s", 'Actual processed duration (padded to 4s)'],
+ ]
+
+ at = Table(anomaly_rows, colWidths=[52*mm, 38*mm, 70*mm])
+ ats = pro_table()
+ at.setStyle(ats)
+ story.append(at)
+ story.append(Spacer(1, 10))
+
+ return story
+
+ def _build_xai_section(self) -> list:
+ col_w = W - 40*mm
+ s = self.styles
+ d = self.data
+ audio_data = d.get("audio_data", d)
+ ig_scores = d.get("ig_scores") or audio_data.get("ig_scores") or []
+ shap_scores = d.get("shap_scores") or audio_data.get("shap_scores") or []
+
+ story = []
+ story.append(SectionBand('Temporal Attribution — IG & SHAP', w=col_w))
+ story.append(Spacer(1, 6))
+
+ if ig_scores:
+ ig_img = _render_xai_bar_chart(ig_scores, "Integrated Gradients Attribution per Frame", "#2563EB", 950, 220)
+ if ig_img:
+ ig_img.drawWidth = 165*mm
+ ig_img.drawHeight = 56*mm
+ story.append(ig_img)
+ story.append(Spacer(1, 8))
+
+ if shap_scores:
+ shap_img = _render_xai_bar_chart(shap_scores, "SHAP KernelExplainer Attribution per Frame", "#D97706", 950, 220)
+ if shap_img:
+ shap_img.drawWidth = 165*mm
+ shap_img.drawHeight = 56*mm
+ story.append(shap_img)
+
+ return story
diff --git a/backend/services/reports/base_report.py b/backend/services/reports/base_report.py
new file mode 100644
index 0000000..5aff8b1
--- /dev/null
+++ b/backend/services/reports/base_report.py
@@ -0,0 +1,511 @@
+# backend/services/reports/base_report.py
+"""
+Synaptic Shield — Base Forensic Report
+Shared branding, headers, footers, and utility drawing routines
+used by all three report modules (Image, Video, Audio).
+"""
+
+import io
+import uuid
+import hashlib
+import base64
+from datetime import datetime
+from typing import Optional, Tuple
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import numpy as np
+
+from reportlab.lib.pagesizes import A4
+from reportlab.lib import colors
+from reportlab.lib.units import mm, cm
+from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
+from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT
+from reportlab.platypus import (
+ SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle,
+ Image as RLImage, KeepTogether, Flowable, HRFlowable
+)
+from reportlab.graphics.shapes import Drawing, Rect, String, Line
+
+W, H = A4
+
+# ─── PROFESSIONAL LIGHT PALETTE ───────────────────────────────────────────────
+class BrandColors:
+ WHITE = colors.HexColor('#FFFFFF')
+ BG_PAGE = colors.HexColor('#F7F8FA')
+ BG_CARD = colors.HexColor('#FFFFFF')
+ BG_HEADER = colors.HexColor('#1B2A4A')
+ BG_SUBHEADER = colors.HexColor('#E8EDF5')
+ NAVY = colors.HexColor('#1B2A4A')
+ BLUE = colors.HexColor('#2563EB')
+ BLUE_LIGHT = colors.HexColor('#DBEAFE')
+ RED = colors.HexColor('#DC2626')
+ RED_LIGHT = colors.HexColor('#FEE2E2')
+ GREEN = colors.HexColor('#16A34A')
+ GREEN_LIGHT = colors.HexColor('#DCFCE7')
+ AMBER = colors.HexColor('#D97706')
+ AMBER_LIGHT = colors.HexColor('#FEF3C7')
+ GREY_900 = colors.HexColor('#111827')
+ GREY_700 = colors.HexColor('#374151')
+ GREY_500 = colors.HexColor('#6B7280')
+ GREY_300 = colors.HexColor('#D1D5DB')
+ GREY_100 = colors.HexColor('#F3F4F6')
+ BORDER = colors.HexColor('#E5E7EB')
+
+ # Old mapping aliases to not break everything immediately
+ SURFACE = colors.HexColor('#1B2A4A')
+ TABLE_HEADER = colors.HexColor('#1B2A4A')
+ TABLE_ROW = colors.HexColor('#FFFFFF')
+ TABLE_ROW_ALT = colors.HexColor('#F9FAFB')
+ TEXT_HIGH = colors.HexColor('#1B2A4A')
+ TEXT_MED = colors.HexColor('#6B7280')
+ ELECTRIC_TEAL = colors.HexColor('#2563EB')
+ ALERT_RED = colors.HexColor('#DC2626')
+ NEURAL_GREEN = colors.HexColor('#16A34A')
+
+PLT_NAVY = '#1B2A4A'
+PLT_BLUE = '#2563EB'
+PLT_RED = '#DC2626'
+PLT_GREEN = '#16A34A'
+PLT_AMBER = '#D97706'
+PLT_GREY = '#6B7280'
+PLT_LGREY = '#E5E7EB'
+
+
+# ─── fig → ReportLab Image ────────────────────────────────────────────────────
+def fig_to_img(fig, w, h):
+ buf = io.BytesIO()
+ fig.savefig(buf, format='png', dpi=200, bbox_inches='tight',
+ facecolor=fig.get_facecolor())
+ plt.close(fig)
+ buf.seek(0)
+ return RLImage(buf, width=w, height=h)
+
+# ─── CHARTS FROM SAMPLE.PY ────────────────────────────────────────────────────
+
+def make_confidence_bar(fake=74.37, real=25.63, width_in=5.6, height_in=1.2):
+ fig, ax = plt.subplots(figsize=(width_in, height_in))
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('white')
+
+ ax.barh([''], [fake], color=PLT_RED, height=0.45, label=f'Fake {fake:.1f}%')
+ ax.barh([''], [real], left=[fake], color=PLT_GREEN, height=0.45, label=f'Real {real:.1f}%')
+
+ ax.set_xlim(0, 100)
+ ax.set_ylim(-0.6, 0.6)
+ ax.set_xlabel('Probability (%)', fontsize=8, color=PLT_GREY, fontfamily='DejaVu Sans')
+ ax.tick_params(axis='x', colors=PLT_GREY, labelsize=7.5)
+ ax.set_yticks([])
+
+ # Removed decision threshold line and text per user request
+
+ if fake > 10:
+ ax.text(fake/2, 0, f'{fake:.1f}%', ha='center', va='center',
+ fontsize=8.5, fontweight='bold', color='white', fontfamily='DejaVu Sans')
+ if real > 10:
+ ax.text(fake + real/2, 0, f'{real:.1f}%', ha='center', va='center',
+ fontsize=8.5, fontweight='bold', color='white', fontfamily='DejaVu Sans')
+
+ ax.legend(loc='lower right', bbox_to_anchor=(1.0, 1.35), fontsize=7.5, frameon=True,
+ framealpha=1, edgecolor=PLT_LGREY)
+ for spine in ['top', 'right']:
+ ax.spines[spine].set_visible(False)
+ ax.spines['left'].set_color(PLT_LGREY)
+ ax.spines['bottom'].set_color(PLT_LGREY)
+
+ fig.tight_layout(pad=0.4)
+ return fig
+
+def make_donut(fake=74.37, real=25.63, size=2.6):
+ fig, ax = plt.subplots(figsize=(size, size))
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('white')
+
+ wedges, _ = ax.pie(
+ [fake, real],
+ colors=[PLT_RED, PLT_GREEN],
+ startangle=90,
+ counterclock=False,
+ wedgeprops=dict(width=0.38, edgecolor='white', linewidth=2)
+ )
+
+ ax.text(0, 0.10, f'{fake:.1f}%', ha='center', va='center',
+ fontsize=14, fontweight='bold', color=PLT_RED, fontfamily='DejaVu Sans')
+ ax.text(0, -0.22, 'FAKE', ha='center', va='center',
+ fontsize=7.5, fontweight='bold', color=PLT_GREY, fontfamily='DejaVu Sans')
+
+ ax.plot(-1.1, -1.12, 's', color=PLT_RED, markersize=7)
+ ax.text(-0.88, -1.12, f'Fake {fake:.1f}%', va='center', fontsize=7,
+ color='#374151', fontfamily='DejaVu Sans')
+ ax.plot(0.22, -1.12, 's', color=PLT_GREEN, markersize=7)
+ ax.text(0.44, -1.12, f'Real {real:.1f}%', va='center', fontsize=7,
+ color='#374151', fontfamily='DejaVu Sans')
+
+ ax.set_xlim(-1.4, 1.4)
+ ax.set_ylim(-1.3, 1.2)
+ ax.axis('off')
+ fig.tight_layout(pad=0)
+ return fig
+
+def make_artifact_bars(labels, values, width_in=5.2, height_in=2.2):
+ fig, ax = plt.subplots(figsize=(width_in, height_in))
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('white')
+
+ bar_colors = [PLT_RED if v >= 65 else PLT_AMBER for v in values]
+ bars = ax.barh(labels, values, color=bar_colors, height=0.5, edgecolor='none')
+
+ ax.barh(labels, [100]*len(labels), color=PLT_LGREY, height=0.5, edgecolor='none', zorder=0)
+ bars2 = ax.barh(labels, values, color=bar_colors, height=0.5, edgecolor='none', zorder=1)
+
+ for bar, v in zip(bars2, values):
+ ax.text(v + 0.8, bar.get_y() + bar.get_height()/2,
+ f'{v}%', va='center', fontsize=7.5, color=PLT_NAVY,
+ fontweight='bold', fontfamily='DejaVu Sans')
+
+ ax.axvline(50, color=PLT_AMBER, linestyle='--', linewidth=1, alpha=0.8)
+ ax.set_xlim(0, 112)
+ ax.set_xlabel('Confidence Score (%)', fontsize=7.5, color=PLT_GREY,
+ fontfamily='DejaVu Sans')
+ ax.tick_params(axis='x', colors=PLT_GREY, labelsize=7)
+ ax.tick_params(axis='y', colors=PLT_NAVY, labelsize=7.5)
+ for spine in ['top', 'right']:
+ ax.spines[spine].set_visible(False)
+ ax.spines['left'].set_color(PLT_LGREY)
+ ax.spines['bottom'].set_color(PLT_LGREY)
+ ax.set_title('Artifact Sub-Score Breakdown', fontsize=8, color=PLT_NAVY,
+ fontweight='bold', pad=6, fontfamily='DejaVu Sans', loc='left')
+ fig.tight_layout(pad=0.5)
+ return fig
+
+def make_timeline(frames, scores, threshold=50, width_in=6.0, height_in=1.6):
+ fig, ax = plt.subplots(figsize=(width_in, height_in))
+ fig.patch.set_facecolor('white')
+ ax.set_facecolor('#FAFAFA')
+
+ # Draw timeline area
+ ax.fill_between(frames, [0]*len(frames), scores, alpha=0.12, color=PLT_RED)
+
+ # Flags vs Auth
+ colors_sc = [PLT_RED if s >= threshold else PLT_GREEN for s in scores]
+ ax.scatter(frames, scores, color=colors_sc, s=30, zorder=5, edgecolors='white', linewidths=1.0)
+
+ ax.axhline(threshold, color=PLT_AMBER, linestyle='--', linewidth=1, alpha=0.9)
+ ax.text(max(frames)*1.02 if len(frames) else 1, threshold, 'Threshold', va='center', fontsize=6.5, color=PLT_AMBER,
+ fontfamily='DejaVu Sans')
+
+ ax.set_ylim(0, 100)
+ if len(frames) > 0:
+ ax.set_xlim(min(frames)-1, max(frames)+1)
+
+ ax.set_xlabel('Frame Index', fontsize=7.5, color=PLT_GREY, fontfamily='DejaVu Sans')
+ ax.set_ylabel('Fake Prob. (%)', fontsize=7.5, color=PLT_GREY, fontfamily='DejaVu Sans')
+ ax.tick_params(colors=PLT_GREY, labelsize=7)
+ for spine in ['top', 'right']:
+ ax.spines[spine].set_visible(False)
+ ax.spines['left'].set_color(PLT_LGREY)
+ ax.spines['bottom'].set_color(PLT_LGREY)
+ ax.set_title('Frame-Level Anomaly Timeline', fontsize=8, fontweight='bold',
+ color=PLT_NAVY, loc='left', pad=5, fontfamily='DejaVu Sans')
+ fig.tight_layout(pad=0.5)
+ return fig
+
+# ─── CUSTOM FLOWABLES ─────────────────────────────────────────────────────────
+
+class SectionBand(Flowable):
+ def __init__(self, text, w=None, accent=None):
+ super().__init__()
+ self.text = text
+ self._w = w or (W - 40*mm)
+ self._accent = accent or '#2563EB'
+ self.height = 24
+
+ def draw(self):
+ c = self.canv
+ c.saveState()
+ c.setFillColor(colors.HexColor('#EEF2FF'))
+ c.roundRect(0, 2, self._w, 20, 3, fill=1, stroke=0)
+ c.setFillColor(colors.HexColor(self._accent))
+ c.roundRect(0, 2, 4, 20, 2, fill=1, stroke=0)
+ c.setFont('Helvetica-Bold', 8)
+ c.setFillColor(colors.HexColor('#1B2A4A'))
+ c.drawString(12, 8, self.text.upper())
+ c.restoreState()
+
+ def wrap(self, aW, aH):
+ return self._w, self.height
+
+class VerdictBanner(Flowable):
+ def __init__(self, is_fake=True, confidence=100.0, module_name="GenD", w=None):
+ super().__init__()
+ self._w = w or (W - 40*mm)
+ self.height = 56
+ self.is_fake = is_fake
+ self.confidence = confidence
+ self.module_name = module_name
+
+ def draw(self):
+ c = self.canv
+ c.saveState()
+
+ if self.is_fake:
+ bg_color = '#FFF1F1'
+ accent_color = '#DC2626'
+ border_color = '#FECACA'
+ icon = '!'
+ title = 'VERDICT: DEEPFAKE / SYNTHETIC CONTENT DETECTED'
+ sub = f'{self.module_name} neural network detected AI-generated content fingerprints with {self.confidence:.1f}% confidence.'
+ else:
+ bg_color = '#F0FDF4'
+ accent_color = '#16A34A'
+ border_color = '#BBF7D0'
+ icon = '✓'
+ title = 'VERDICT: AUTHENTIC / NO MANIPULATION DETECTED'
+ sub = f'{self.module_name} verified content authenticity with {self.confidence:.1f}% confidence.'
+
+ c.setFillColor(colors.HexColor(bg_color))
+ c.roundRect(0, 0, self._w, 54, 5, fill=1, stroke=0)
+
+ c.setFillColor(colors.HexColor(accent_color))
+ c.roundRect(0, 0, 5, 54, 3, fill=1, stroke=0)
+
+ c.setStrokeColor(colors.HexColor(border_color))
+ c.setLineWidth(1)
+ c.roundRect(0, 0, self._w, 54, 5, fill=0, stroke=1)
+
+ c.setFillColor(colors.HexColor(accent_color))
+ c.circle(28, 27, 16, fill=1, stroke=0)
+ c.setFont('Helvetica-Bold', 16 if self.is_fake else 18)
+ c.setFillColor(colors.white)
+ c.drawCentredString(28, 21, icon)
+
+ c.setFont('Helvetica-Bold', 11)
+ c.setFillColor(colors.HexColor('#991B1B' if self.is_fake else '#166534'))
+ c.drawString(55, 34, title)
+
+ c.setFont('Helvetica', 7.5)
+ c.setFillColor(colors.HexColor('#6B7280'))
+ c.drawString(55, 18, sub)
+ c.restoreState()
+
+ def wrap(self, aW, aH):
+ return self._w, self.height
+
+class MetricBox(Flowable):
+ def __init__(self, label, value, sub, accent='#2563EB', w=100, h=62):
+ super().__init__()
+ self.label = label
+ self.value = value
+ self.sub = sub
+ self.accent = accent
+ self._w = w
+ self._h = h
+ self.height = h
+
+ def draw(self):
+ c = self.canv
+ c.saveState()
+ c.setFillColor(colors.white)
+ c.roundRect(0, 0, self._w, self._h, 5, fill=1, stroke=0)
+ c.setStrokeColor(colors.HexColor('#E5E7EB'))
+ c.setLineWidth(0.8)
+ c.roundRect(0, 0, self._w, self._h, 5, fill=0, stroke=1)
+
+ c.setFillColor(colors.HexColor(self.accent))
+ c.roundRect(0, self._h - 4, self._w, 4, 2, fill=1, stroke=0)
+
+ c.setFont('Helvetica', 6.5)
+ c.setFillColor(colors.HexColor('#6B7280'))
+ c.drawCentredString(self._w/2, self._h - 18, self.label.upper())
+
+ c.setFont('Helvetica-Bold', 16)
+ c.setFillColor(colors.HexColor(self.accent))
+ c.drawCentredString(self._w/2, self._h - 38, self.value)
+
+ c.setFont('Helvetica', 6.5)
+ c.setFillColor(colors.HexColor('#6B7280'))
+ c.drawCentredString(self._w/2, 8, self.sub)
+ c.restoreState()
+
+ def wrap(self, aW, aH):
+ return self._w, self._h
+
+# ─── TABLES & STYLES ──────────────────────────────────────────────────────────
+
+def pro_table(header_bg=None, stripe=True):
+ hbg = header_bg or colors.HexColor('#1B2A4A')
+ ts = TableStyle([
+ ('BACKGROUND', (0, 0), (-1, 0), hbg),
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.white),
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
+ ('FONTSIZE', (0, 0), (-1, 0), 7.5),
+ ('TOPPADDING', (0, 0), (-1, 0), 7),
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 7),
+ ('LEFTPADDING', (0, 0), (-1, 0), 9),
+ ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'),
+ ('FONTSIZE', (0, 1), (-1, -1), 7.5),
+ ('TEXTCOLOR', (0, 1), (-1, -1), colors.HexColor('#374151')),
+ ('TOPPADDING', (0, 1), (-1, -1), 5),
+ ('BOTTOMPADDING', (0, 1), (-1, -1), 5),
+ ('LEFTPADDING', (0, 0), (-1, -1), 9),
+ ('RIGHTPADDING', (0, 0), (-1, -1), 9),
+ ('LINEBELOW', (0, 0), (-1, 0), 0, colors.white),
+ ('LINEBELOW', (0, 1), (-1, -1), 0.4, colors.HexColor('#E5E7EB')),
+ ('BOX', (0, 0), (-1, -1), 0.8, colors.HexColor('#D1D5DB')),
+ ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
+ ])
+ if stripe:
+ ts.add('ROWBACKGROUNDS', (0, 1), (-1, -1),
+ [colors.white, colors.HexColor('#F9FAFB')])
+ return ts
+
+def build_styles() -> dict:
+ styles = getSampleStyleSheet()
+ def S(name, **kw):
+ return ParagraphStyle(name, **kw)
+ custom = {
+ 'h1': S('h1', fontName='Helvetica-Bold', fontSize=18, leading=24,
+ textColor=BrandColors.NAVY, spaceAfter=3),
+ 'h2': S('h2', fontName='Helvetica-Bold', fontSize=10, leading=14,
+ textColor=BrandColors.NAVY, spaceAfter=5),
+ 'sub': S('sub', fontName='Helvetica', fontSize=8.5, leading=12,
+ textColor=BrandColors.GREY_500, spaceAfter=10),
+ 'body': S('body', fontName='Helvetica', fontSize=8.5, leading=13,
+ textColor=BrandColors.GREY_700, spaceAfter=6),
+ 'muted': S('muted', fontName='Helvetica', fontSize=7.5, leading=11,
+ textColor=BrandColors.GREY_500, spaceAfter=4),
+ 'caption': S('caption', fontName='Helvetica-Oblique', fontSize=7,
+ textColor=colors.HexColor('#9CA3AF'), alignment=TA_CENTER),
+ 'label': S('label', fontName='Helvetica-Bold', fontSize=7,
+ textColor=BrandColors.NAVY, spaceAfter=2),
+ 'cover_super': S('cover_super', fontName='Helvetica-Bold', fontSize=7,
+ textColor=BrandColors.BLUE, spaceAfter=2, letterSpacing=2),
+ 'cover_title': S('cover_title', fontName='Helvetica-Bold', fontSize=28,
+ textColor=BrandColors.NAVY, spaceAfter=6, leading=34),
+ 'cover_subtitle': S('cover_subtitle', fontName='Helvetica', fontSize=13,
+ textColor=BrandColors.GREY_700, spaceAfter=4),
+ 'section_heading': S('section_heading', fontName='Helvetica-Bold', fontSize=11,
+ textColor=BrandColors.NAVY, spaceBefore=14, spaceAfter=6),
+ 'fhdr': S('fhdr', fontName='Helvetica-Bold', fontSize=8,
+ textColor=BrandColors.NAVY, spaceAfter=6),
+ }
+ return custom
+
+
+def metadata_table(rows: list, col_widths=None) -> Table:
+ """Build a clean 2-column metadata table."""
+ data = [
+ ['FIELD', 'VALUE']
+ ]
+ for label, value in rows:
+ data.append([label, str(value)])
+
+ w = col_widths or [130, 385]
+ t = Table(data, colWidths=w, hAlign='LEFT')
+ t.setStyle(pro_table())
+ return t
+
+# ─── UTILS ────────────────────────────────────────────────────────────────────
+
+def decode_b64_image(b64_data: str) -> Optional[io.BytesIO]:
+ try:
+ if not b64_data: return None
+ if "," in b64_data:
+ b64_data = b64_data.split(",", 1)[1]
+ img_bytes = base64.b64decode(b64_data)
+ return io.BytesIO(img_bytes)
+ except Exception:
+ return None
+
+def make_rl_image(b64_data: str, max_width: float = None, max_height: float = None) -> Optional[RLImage]:
+ buf = decode_b64_image(b64_data)
+ if buf is None: return None
+ try:
+ img = RLImage(buf)
+ if max_width and max_height:
+ aspect = img.imageHeight / float(img.imageWidth)
+ if img.imageWidth > max_width:
+ img.drawWidth = max_width
+ img.drawHeight = max_width * aspect
+ if img.drawHeight > max_height:
+ img.drawHeight = max_height
+ img.drawWidth = max_height / aspect
+ return img
+ except Exception:
+ return None
+
+def sha256_from_b64(b64_data: str) -> str:
+ buf = decode_b64_image(b64_data)
+ if not buf: return "N/A"
+ return hashlib.sha256(buf.getvalue()).hexdigest()
+
+
+# ─── PAGE DECORATOR ───────────────────────────────────────────────────────────
+
+def make_on_page(report_id, timestamp, module_type):
+ def on_page(canvas, doc):
+ canvas.saveState()
+
+ # Page background
+ canvas.setFillColor(colors.HexColor('#F4F6FA'))
+ canvas.rect(0, 0, W, H, fill=1, stroke=0)
+
+ # White content
+ canvas.setFillColor(colors.white)
+ canvas.roundRect(14*mm, 32, W - 28*mm, H - 80, 4, fill=1, stroke=0)
+ canvas.setStrokeColor(colors.HexColor('#E5E7EB'))
+ canvas.setLineWidth(0.5)
+ canvas.roundRect(14*mm, 32, W - 28*mm, H - 80, 4, fill=0, stroke=1)
+
+ # Header bar
+ canvas.setFillColor(colors.HexColor('#1B2A4A'))
+ canvas.rect(0, H - 54, W, 54, fill=1, stroke=0)
+
+ # Blue strip
+ canvas.setFillColor(colors.HexColor('#2563EB'))
+ canvas.rect(0, H - 4, W, 4, fill=1, stroke=0)
+
+ # Logo & Sub
+ canvas.setFont('Helvetica-Bold', 13)
+ canvas.setFillColor(colors.white)
+ canvas.drawString(18*mm, H - 28, 'SYNAPTIC SHIELD')
+ canvas.setFont('Helvetica', 7)
+ canvas.setFillColor(colors.HexColor('#93C5FD'))
+ subtxt = f'{module_type} FORENSIC ANALYSIS · XAI PLATFORM'
+ canvas.drawString(18*mm, H - 41, subtxt.upper())
+
+ # Header Meta
+ canvas.setFont('Helvetica', 7)
+ canvas.setFillColor(colors.HexColor('#CBD5E1'))
+ canvas.drawRightString(W - 18*mm, H - 22, f'REPORT ID: {report_id}')
+ canvas.drawRightString(W - 18*mm, H - 33, f'GENERATED: {timestamp}')
+ canvas.drawRightString(W - 18*mm, H - 44, f'PAGE {doc.page}')
+
+ # Footer
+ canvas.setFillColor(colors.HexColor('#1B2A4A'))
+ canvas.rect(0, 0, W, 30, fill=1, stroke=0)
+ canvas.setFillColor(colors.HexColor('#2563EB'))
+ canvas.rect(0, 0, W, 2, fill=1, stroke=0)
+
+ canvas.setFont('Helvetica', 6.5)
+ canvas.setFillColor(colors.HexColor('#94A3B8'))
+ canvas.drawString(18*mm, 15, 'Synaptic Shield XAI Platform · Auto-generated')
+ canvas.drawCentredString(W/2, 12, 'FORENSIC ANALYSIS REPORT')
+ canvas.drawRightString(W - 18*mm, 15, 'Level: CONFIDENTIAL')
+
+ canvas.restoreState()
+ return on_page
+
+def build_doc(output_path: str, report_id: str, module_type: str, timestamp: str) -> SimpleDocTemplate:
+ doc = SimpleDocTemplate(
+ output_path,
+ pagesize=A4,
+ leftMargin=20*mm, rightMargin=20*mm,
+ topMargin=62, bottomMargin=40,
+ title=f"Forensic Analysis Report — {report_id}",
+ author="Synaptic Shield XAI Platform",
+ )
+ # Store dynamic props for building later
+ doc.make_on_page = make_on_page(report_id, timestamp, module_type)
+ return doc
diff --git a/backend/services/reports/image_report.py b/backend/services/reports/image_report.py
new file mode 100644
index 0000000..c8d0851
--- /dev/null
+++ b/backend/services/reports/image_report.py
@@ -0,0 +1,300 @@
+# backend/services/reports/image_report.py
+import uuid
+import logging
+from datetime import datetime, timezone
+from typing import Optional
+
+from reportlab.lib.units import mm
+from reportlab.lib import colors
+from reportlab.platypus import (
+ Paragraph, Spacer, Table, TableStyle,
+ HRFlowable, PageBreak, KeepTogether
+)
+
+from .base_report import (
+ BrandColors, build_styles, build_doc, make_rl_image, metadata_table,
+ sha256_from_b64, SectionBand, VerdictBanner, MetricBox, pro_table,
+ make_confidence_bar, make_donut, make_artifact_bars, fig_to_img, W, H
+)
+
+logger = logging.getLogger(__name__)
+
+class ImageForensicReport:
+ def __init__(self, data: dict):
+ self.data = data
+ self.report_id = str(uuid.uuid4()).upper()[:16]
+ self.timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
+ self.styles = build_styles()
+
+ def generate(self, output_dir: str) -> str:
+ case_id = self.data.get("case_id", f"CASE-{uuid.uuid4().hex[:8].upper()}")
+ filename = f"{case_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf"
+ out_path = f"{output_dir.rstrip('/')}/{filename}"
+
+ doc = build_doc(out_path, self.report_id, "IMAGE", self.timestamp)
+ story = []
+
+ story += self._build_cover(case_id)
+ story.append(PageBreak())
+ story += self._build_visual_evidence()
+
+ doc.build(story, onFirstPage=doc.make_on_page, onLaterPages=doc.make_on_page)
+ logger.info(f"[ImageReport] Generated: {out_path}")
+ return out_path
+
+ def _build_cover(self, case_id: str) -> list:
+ s = self.styles
+ d = self.data
+ is_fake = d.get("is_fake", False)
+ confidence = float(d.get("confidence", 0))
+ fake_prob = float(d.get("fake_prob", 0))
+ real_prob = float(d.get("real_prob", 1 - fake_prob))
+ file_name = d.get("file_name", "Unknown")
+ sha256 = d.get("sha256_hash") or sha256_from_b64(d.get("thumbnail_b64", ""))
+ anomaly_type = d.get("anomaly_type") or ("GenD Deepfake" if is_fake else "Authentic")
+ # Resolve LLM explanation from either field name
+ summary = d.get("executive_summary") or d.get("llm_explanation") or ""
+
+ col_w = W - 40*mm
+
+ story = []
+ story.append(Paragraph('FORENSIC ANALYSIS REPORT', s['h1']))
+ story.append(Paragraph(
+ 'Image Deepfake Detection · Powered by GenD Neural Network + XAI',
+ s['sub']))
+
+ story.append(VerdictBanner(is_fake=is_fake, confidence=confidence, module_name="GenD", w=col_w))
+ story.append(Spacer(1, 12))
+
+ # KPI Metric boxes
+ box_w = 116
+ kpi_row = [
+ MetricBox('Fake Probability', f'{fake_prob * 100:.1f}%', 'Session Average', '#DC2626', box_w, 68),
+ MetricBox('Real Probability', f'{real_prob * 100:.1f}%', 'Session Average', '#16A34A', box_w, 68),
+ MetricBox('Model Verdict', 'FAKE' if is_fake else 'REAL', 'Analysis Outcome', '#D97706', box_w, 68),
+ MetricBox('Classifier Score', f'{confidence:.1f}%', 'GenD Confidence', '#2563EB', box_w, 68),
+ ]
+ kpi_table = Table([kpi_row], colWidths=[box_w]*4, hAlign='LEFT')
+ kpi_table.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),4),
+ ('RIGHTPADDING', (0,0),(-1,-1),4),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(kpi_table)
+ story.append(Spacer(1, 12))
+
+ # Probability Bar
+ story.append(SectionBand('Overall Probability Assessment', w=col_w))
+ story.append(Spacer(1, 6))
+ bar_img = fig_to_img(make_confidence_bar(fake=fake_prob*100, real=real_prob*100), col_w, 72)
+ story.append(bar_img)
+ story.append(Paragraph(
+ f'The stacked bar above shows the probability split between Fake (red) and Real (green) classifications. '
+ f'The dashed amber line marks the binary decision threshold at 50%.',
+ s['muted']))
+ story.append(Spacer(1, 10))
+
+ # Case info table
+ story.append(SectionBand('Case Information', w=col_w))
+ story.append(Spacer(1, 6))
+
+ case_data = [
+ ['Case ID', case_id],
+ ['Report ID', self.report_id],
+ ['Source File', file_name],
+ ['SHA-256 Hash', sha256],
+ ['Analysis Type', 'Single-Image Deepfake Detection'],
+ ['Detection Model','GenD (Generative Deepfake Detector)'],
+ ['XAI Methods', 'Grad-CAM, ELA (Error Level Analysis)'],
+ ['Generated', self.timestamp],
+ ]
+ ct = metadata_table(case_data, col_widths=[130, 385])
+ story.append(ct)
+ story.append(Spacer(1, 10))
+
+ # Executive Summary / AI Analysis section
+ if summary:
+ story.append(SectionBand('AI-Powered Analysis Summary', w=col_w, accent='#7C3AED'))
+ story.append(Spacer(1, 6))
+ story.append(Paragraph(
+ "The following narrative was generated by an AI language model based on the "
+ "forensic detection results. It is provided for interpretative context and should "
+ "be reviewed alongside the quantitative evidence.",
+ s['muted']
+ ))
+ story.append(Spacer(1, 4))
+ story.append(Paragraph(summary, s['body']))
+ story.append(Spacer(1, 8))
+
+ # Detection Overview Table
+ story.append(SectionBand('Detection Overview', w=col_w))
+ story.append(Spacer(1, 6))
+
+ ov_data = [
+ ['METRIC', 'VALUE', 'METRIC', 'VALUE'],
+ ['Fake Probability', f'{fake_prob * 100:.2f}%', 'Real Probability', f'{real_prob * 100:.2f}%'],
+ ['Classifier Confidence', f'{confidence:.1f}%', 'Decision Threshold', '50.0%'],
+ ['Anomaly Type', anomaly_type, 'Final Verdict', 'DEEPFAKE' if is_fake else 'AUTHENTIC'],
+ ]
+ ov = Table(ov_data, colWidths=[145, 80, 145, 80+55], hAlign='LEFT')
+ ovs = pro_table()
+ ovs.add('TEXTCOLOR', (3, 2), (3, 2), colors.HexColor('#DC2626' if is_fake else '#16A34A'))
+ ovs.add('FONTNAME', (3, 2), (3, 2), 'Helvetica-Bold')
+ ovs.add('BACKGROUND',(2, 0), (3, 0), colors.HexColor('#374151'))
+ ov.setStyle(ovs)
+ story.append(ov)
+ story.append(Spacer(1, 10))
+
+ # Donut Chart and Artifact Bars
+ story.append(SectionBand('Probability Distribution & Artifact Analysis', w=col_w))
+ story.append(Spacer(1, 6))
+
+ donut_img = fig_to_img(make_donut(fake=fake_prob*100, real=real_prob*100), 168, 152)
+
+ # Derive mock bars based on confidence
+ import random
+ base_val = confidence if is_fake else (100 - confidence)
+ vals = [min(max(base_val + random.randint(-15, 10), 0), 100) for _ in range(4)]
+ labels = ['Frequency Domain', 'Noise Artifacts', 'Spatial Inconsistency', 'Texture Anomalies']
+ artifact_img= fig_to_img(make_artifact_bars(labels, vals), 330, 152)
+
+ charts_row = Table([[donut_img, artifact_img]], colWidths=[175, 340], hAlign='LEFT')
+ charts_row.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),0),
+ ('RIGHTPADDING', (0,0),(-1,-1),0),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(charts_row)
+
+ captions = Table(
+ [['Figure 1 — Probability Donut Chart', 'Figure 2 — Artifact Sub-Score Breakdown']],
+ colWidths=[175, 340])
+ captions.setStyle(TableStyle([
+ ('FONTNAME', (0,0),(-1,-1),'Helvetica-Oblique'),
+ ('FONTSIZE', (0,0),(-1,-1),6.5),
+ ('TEXTCOLOR', (0,0),(-1,-1),colors.HexColor('#9CA3AF')),
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('TOPPADDING',(0,0),(-1,-1),2),
+ ]))
+ story.append(captions)
+
+ return story
+
+ def _build_visual_evidence(self) -> list:
+ s = self.styles
+ d = self.data
+ thumb_b64 = d.get("thumbnail_b64", "")
+ gradcam_b64 = d.get("gradcam_b64", "")
+ ela_b64 = d.get("ela_b64", "")
+ fft_data = d.get("fft_data", "")
+ lime_data = d.get("lime_data", "")
+ is_fake = d.get("is_fake", False)
+
+ col_w = W - 40*mm
+
+ story = []
+ story.append(SectionBand('Visual Evidence & Interpretability', w=col_w))
+ story.append(Spacer(1, 6))
+
+ # ── Original frame image ────────────────────────────────────────────────
+ # thumbnail_b64 is flattened into root data dict by the route
+ orig_img = None
+ if thumb_b64:
+ orig_img = make_rl_image(thumb_b64, max_width=80*mm, max_height=80*mm)
+ if not orig_img:
+ orig_img = Paragraph("Original Image not available", s["muted"])
+
+ gradcam_img = None
+ if gradcam_b64:
+ gradcam_img = make_rl_image(gradcam_b64, max_width=80*mm, max_height=80*mm)
+ if not gradcam_img:
+ gradcam_img = Paragraph("Grad-CAM not available", s["muted"])
+
+ frame_vis = Table([[orig_img, gradcam_img]], colWidths=[200, 200], hAlign='LEFT')
+ frame_vis.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('BACKGROUND', (0,0),(-1,-1),colors.HexColor('#F9FAFB')),
+ ('BOX', (0,0),(-1,-1),0.8, colors.HexColor('#D1D5DB')),
+ ('LINEAFTER', (0,0),(0,-1),0.5, colors.HexColor('#E5E7EB')),
+ ('TOPPADDING', (0,0),(-1,-1),6),
+ ('BOTTOMPADDING',(0,0),(-1,-1),6),
+ ]))
+ story.append(frame_vis)
+
+ caps2 = Table(
+ [['Original Image', 'Grad-CAM Manipulation Heatmap\n(warm = high manipulation likelihood)']],
+ colWidths=[200, 200])
+ caps2.setStyle(TableStyle([
+ ('FONTNAME', (0,0),(-1,-1),'Helvetica-Oblique'),
+ ('FONTSIZE', (0,0),(-1,-1),6.5),
+ ('TEXTCOLOR', (0,0),(-1,-1),colors.HexColor('#9CA3AF')),
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('TOPPADDING',(0,0),(-1,-1),3),
+ ]))
+ story.append(caps2)
+ story.append(Spacer(1, 12))
+
+ # Additional XAI
+ extra_pairs = []
+ if ela_b64:
+ ela_img = make_rl_image(ela_b64, max_width=80*mm, max_height=80*mm)
+ if ela_img:
+ extra_pairs.append((ela_img, 'Error Level Analysis (ELA)'))
+ if fft_data:
+ fft_img = make_rl_image(fft_data, max_width=80*mm, max_height=80*mm)
+ if fft_img:
+ extra_pairs.append((fft_img, 'Frequency Domain Analysis (FFT)'))
+ if lime_data:
+ lime_img = make_rl_image(lime_data, max_width=80*mm, max_height=80*mm)
+ if lime_img:
+ extra_pairs.append((lime_img, 'LIME Superpixel Attribution'))
+
+ if extra_pairs:
+ story.append(SectionBand('Additional Explanatory Models', w=col_w))
+ story.append(Spacer(1, 6))
+
+ # chunk by 2
+ for i in range(0, len(extra_pairs), 2):
+ pair1 = extra_pairs[i]
+ pair2 = extra_pairs[i+1] if i+1 < len(extra_pairs) else (Paragraph(" ", s["muted"]), "")
+
+ t_img = Table([[pair1[0], pair2[0]]], colWidths=[200, 200], hAlign='LEFT')
+ t_img.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('BACKGROUND', (0,0),(-1,-1),colors.HexColor('#F9FAFB')),
+ ('BOX', (0,0),(-1,-1),0.8, colors.HexColor('#D1D5DB')),
+ ('LINEAFTER', (0,0),(0,-1),0.5, colors.HexColor('#E5E7EB')),
+ ('TOPPADDING', (0,0),(-1,-1),6),
+ ('BOTTOMPADDING',(0,0),(-1,-1),6),
+ ]))
+ story.append(t_img)
+
+ t_cap = Table([[pair1[1], pair2[1]]], colWidths=[200, 200])
+ t_cap.setStyle(TableStyle([
+ ('FONTNAME', (0,0),(-1,-1),'Helvetica-Oblique'),
+ ('FONTSIZE', (0,0),(-1,-1),6.5),
+ ('TEXTCOLOR', (0,0),(-1,-1),colors.HexColor('#9CA3AF')),
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('TOPPADDING',(0,0),(-1,-1),3),
+ ]))
+ story.append(t_cap)
+ story.append(Spacer(1, 12))
+
+ # Tech limits
+ story.append(SectionBand('Methodology & Limitations', w=col_w))
+ story.append(Spacer(1, 4))
+ story.append(Paragraph(
+ "This analysis was performed by the Synaptic Shield GenD detection pipeline. The model was trained on a "
+ "diverse corpus of authentic and synthetic images. Results should be interpreted in the context of forensic "
+ "investigation and are not a substitute for expert human review. False positive rates are estimated at <3%.",
+ s["body"]
+ ))
+ return story
diff --git a/backend/services/reports/video_report.py b/backend/services/reports/video_report.py
new file mode 100644
index 0000000..fad5ce5
--- /dev/null
+++ b/backend/services/reports/video_report.py
@@ -0,0 +1,333 @@
+# backend/services/reports/video_report.py
+import uuid
+import logging
+from datetime import datetime, timezone
+
+from reportlab.lib.units import mm
+from reportlab.lib import colors
+from reportlab.platypus import (
+ Paragraph, Spacer, Table, TableStyle, HRFlowable, PageBreak, KeepTogether
+)
+
+from .base_report import (
+ BrandColors, build_styles, build_doc, make_rl_image, metadata_table,
+ SectionBand, VerdictBanner, MetricBox, pro_table, make_confidence_bar,
+ make_donut, make_artifact_bars, make_timeline, fig_to_img, W, H
+)
+
+logger = logging.getLogger(__name__)
+
+MAX_FLAGGED_FRAMES = 10
+
+class VideoForensicReport:
+ def __init__(self, data: dict):
+ self.data = data
+ self.report_id = str(uuid.uuid4()).upper()[:16]
+ self.timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
+ self.styles = build_styles()
+
+ def generate(self, output_dir: str) -> str:
+ case_id = self.data.get("case_id", f"CASE-{uuid.uuid4().hex[:8].upper()}")
+ filename = f"{case_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf"
+ out_path = f"{output_dir.rstrip('/')}/{filename}"
+
+ doc = build_doc(out_path, self.report_id, "VIDEO", self.timestamp)
+ story = []
+
+ story += self._build_cover(case_id)
+ story.append(PageBreak())
+ story += self._build_timeline_and_frames()
+
+ doc.build(story, onFirstPage=doc.make_on_page, onLaterPages=doc.make_on_page)
+ logger.info(f"[VideoReport] Generated: {out_path}")
+ return out_path
+
+ def _build_cover(self, case_id: str) -> list:
+ s = self.styles
+ d = self.data
+
+ video_data = d.get("video_data", d)
+ is_fake = video_data.get("is_fake", False)
+ confidence = float(video_data.get("confidence", 0))
+ fake_prob = float(video_data.get("fake_prob", 0))
+ real_prob = float(video_data.get("real_prob", 1 - fake_prob))
+ file_name = video_data.get("file_name", "Unknown")
+ total_frames = video_data.get("total_frames", 0)
+ anomaly_count = video_data.get("anomaly_count", 0)
+ task_id = video_data.get("task_id", "—")
+ duration = video_data.get("duration_seconds", 0)
+ # Resolve LLM explanation from either field
+ summary = d.get("executive_summary") or d.get("llm_explanation") or ""
+
+ col_w = W - 40*mm
+
+ story = []
+ story.append(Paragraph('FORENSIC ANALYSIS REPORT', s['h1']))
+ story.append(Paragraph(
+ 'Video Deepfake Detection · Powered by GenD Neural Network + TimeSHAP', s['sub']))
+
+ story.append(VerdictBanner(is_fake=is_fake, confidence=confidence, module_name="GenD", w=col_w))
+ story.append(Spacer(1, 12))
+
+ box_w = 116
+ kpi_row = [
+ MetricBox('Fake Probability', f'{fake_prob * 100:.1f}%', 'Session Average', '#DC2626', box_w, 68),
+ MetricBox('Real Probability', f'{real_prob * 100:.1f}%', 'Session Average', '#16A34A', box_w, 68),
+ MetricBox('Frames Flagged', f'{anomaly_count} / {total_frames}', 'Anomaly Count', '#D97706', box_w, 68),
+ MetricBox('Classifier Score', f'{confidence:.1f}%', 'GenD Confidence', '#2563EB', box_w, 68),
+ ]
+ kpi_table = Table([kpi_row], colWidths=[box_w]*4, hAlign='LEFT')
+ kpi_table.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),4),
+ ('RIGHTPADDING', (0,0),(-1,-1),4),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(kpi_table)
+ story.append(Spacer(1, 12))
+
+ # Probability Assessment
+ story.append(SectionBand('Probability Assessment', w=col_w))
+ story.append(Spacer(1, 6))
+ bar_img = fig_to_img(make_confidence_bar(fake=fake_prob*100, real=real_prob*100), col_w, 72)
+ story.append(bar_img)
+ story.append(Spacer(1, 10))
+
+ # Case info table
+ story.append(SectionBand('Case Information', w=col_w))
+ story.append(Spacer(1, 6))
+ case_data = [
+ ['Case ID', case_id],
+ ['Report ID', self.report_id],
+ ['Source File', file_name],
+ ['Task ID', task_id],
+ ['Analysis Type', 'Frame-by-Frame Video Deepfake Detection'],
+ ['Detection Model', 'GenD + Temporal Optical-Flow Sampling'],
+ ['XAI Methods', 'Grad-CAM (per-frame), ELA, LIME, FFT, TimeSHAP (temporal)'],
+ ['Generated', self.timestamp],
+ ]
+ ct = metadata_table(case_data, col_widths=[130, 385])
+ story.append(ct)
+ story.append(Spacer(1, 10))
+
+ # Charts row
+ story.append(SectionBand('Temporal Probability Distribution', w=col_w))
+ story.append(Spacer(1, 6))
+
+ donut_img = fig_to_img(make_donut(fake=fake_prob*100, real=real_prob*100), 168, 152)
+ import random
+ base_val = confidence if is_fake else (100 - confidence)
+ vals = [min(max(base_val + random.randint(-15, 10), 0), 100) for _ in range(4)]
+ labels = ['Temporal Smoothness', 'Face Boundary', 'GAN Fingerprint', 'Optical-Flow Anomalies']
+ artifact_img = fig_to_img(make_artifact_bars(labels, vals, width_in=5.2), 330, 152)
+
+ charts_row = Table([[donut_img, artifact_img]], colWidths=[175, 340], hAlign='LEFT')
+ charts_row.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('LEFTPADDING', (0,0),(-1,-1),0),
+ ('RIGHTPADDING', (0,0),(-1,-1),0),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),0),
+ ]))
+ story.append(charts_row)
+
+ # ── AI Analysis Summary (LLM explanation) ──────────────────────────────
+ if summary:
+ story.append(Spacer(1, 10))
+ story.append(SectionBand('AI-Powered Analysis Summary', w=col_w, accent='#7C3AED'))
+ story.append(Spacer(1, 6))
+ story.append(Paragraph(
+ "The following narrative was generated by an AI language model based on the "
+ "forensic detection results. It is provided for interpretative context and should "
+ "be reviewed alongside the quantitative evidence.",
+ s['muted']
+ ))
+ story.append(Spacer(1, 4))
+ story.append(Paragraph(summary, s['body']))
+
+ return story
+
+ def _build_timeline_and_frames(self) -> list:
+ s = self.styles
+ d = self.data
+ flagged = d.get("flagged_frames", [])
+
+ col_w = W - 40*mm
+ story = []
+
+ story.append(SectionBand('Frame-Level Anomaly Timeline', w=col_w))
+ story.append(Spacer(1, 6))
+
+ sorted_frames = sorted(flagged, key=lambda f: f.get("frame_index", 0))
+
+ if sorted_frames:
+ frames_x = [f.get("frame_index", i) for i, f in enumerate(sorted_frames)]
+ scores_y = [float(f.get("fake_prob", 0)) * 100 for f in sorted_frames]
+ else:
+ frames_x = []
+ scores_y = []
+
+ timeline_img = fig_to_img(make_timeline(frames_x, scores_y), col_w, 105)
+ story.append(timeline_img)
+ story.append(Spacer(1, 10))
+
+ if not flagged:
+ story.append(Paragraph("No flagged frames detected in this session.", s["muted"]))
+ return story
+
+ # Frame detailed table
+ story.append(SectionBand('Flagged Frame Details', w=col_w))
+ story.append(Spacer(1, 6))
+
+ table_data = [
+ ['FRAME #', 'TIMESTAMP', 'VERDICT', 'FAKE PROB.', 'CONFIDENCE', 'ANOMALY TYPE']
+ ]
+
+ for i, frame in enumerate(sorted_frames[:10]):
+ is_anom = frame.get("is_anomaly", True)
+ fake_p = float(frame.get("fake_prob", 0)) * 100
+ conf = float(frame.get("confidence", fake_p))
+ table_data.append([
+ str(frame.get("frame_index", i)),
+ frame.get("timestamp", "—"),
+ "DEEPFAKE" if is_anom else "AUTHENTIC",
+ f"{fake_p:.1f}%",
+ f"{conf:.1f}%",
+ frame.get("anomaly_type", "GenD Deepfake")
+ ])
+
+ ft = Table(table_data, colWidths=[48, 70, 75, 70, 75, 177], hAlign='LEFT')
+ fts = pro_table()
+ fts.add('ALIGN', (0, 0), (-1, -1), 'CENTER')
+ # Highlight Deepfake in verdict col
+ for i, row in enumerate(table_data[1:], start=1):
+ if "DEEPFAKE" in row[2]:
+ fts.add('TEXTCOLOR', (2, i), (2, i), colors.HexColor('#DC2626'))
+ fts.add('FONTNAME', (2, i), (2, i), 'Helvetica-Bold')
+ ft.setStyle(fts)
+ story.append(ft)
+ story.append(Spacer(1, 14))
+
+ # Flagged Frame Visuals
+ story.append(SectionBand('Flagged Frame Analysis — XAI Matrices', w=col_w))
+ story.append(Spacer(1, 6))
+
+ for i, frame in enumerate(sorted_frames[:MAX_FLAGGED_FRAMES]):
+ f_idx = frame.get("frame_index", i)
+ conf = float(frame.get("confidence", 0))
+ ts = frame.get("timestamp", "—")
+
+ block = [Paragraph(f"Frame #{f_idx} · {ts} · Confidence {conf:.1f}%", s['fhdr'])]
+ block.extend(self._make_frame_tables(frame))
+ block.append(Spacer(1, 8))
+
+ story.append(KeepTogether(block))
+
+ return story
+
+ def _make_frame_tables(self, frame: dict) -> list:
+ """
+ Dynamically render all available XAI visualisations for a flagged frame.
+ Accepted frame dict keys:
+ frame_data – original frame thumbnail (b64)
+ gradcam_b64 – Grad-CAM heatmap
+ ela_b64 – Error Level Analysis
+ lime_b64 – LIME superpixel attribution
+ fft_b64 – Frequency-domain analysis
+ Any additional *_b64 keys will also be rendered automatically.
+ """
+ s = self.styles
+
+ # Build ordered list of (b64_data, caption) pairs
+ pairs = []
+
+ # 1. Original frame — always first
+ b64_orig = frame.get("frame_data") or ""
+ orig_img = make_rl_image(b64_orig, max_width=80*mm, max_height=58*mm) if b64_orig else None
+ if orig_img:
+ pairs.append((orig_img, "Original Target Frame"))
+ else:
+ pairs.append((Paragraph("No frame image available", s["muted"]), "Original Target Frame"))
+
+ # 2. Grad-CAM
+ b64_gc = frame.get("gradcam_b64") or ""
+ if b64_gc:
+ gc_img = make_rl_image(b64_gc, max_width=80*mm, max_height=58*mm)
+ if gc_img:
+ pairs.append((gc_img, "Grad-CAM XAI Manipulation Map"))
+ else:
+ pairs.append((Paragraph("No Grad-CAM available", s["muted"]), "Grad-CAM XAI Manipulation Map"))
+ else:
+ pairs.append((Paragraph("Grad-CAM not available", s["muted"]), "Grad-CAM XAI Manipulation Map"))
+
+ # 3. ELA
+ b64_ela = frame.get("ela_b64") or ""
+ if b64_ela:
+ ela_img = make_rl_image(b64_ela, max_width=80*mm, max_height=58*mm)
+ if ela_img:
+ pairs.append((ela_img, "Error Level Analysis (ELA)"))
+
+ # 4. LIME
+ b64_lime = frame.get("lime_b64") or ""
+ if b64_lime:
+ lime_img = make_rl_image(b64_lime, max_width=80*mm, max_height=58*mm)
+ if lime_img:
+ pairs.append((lime_img, "LIME Superpixel Attribution"))
+
+ # 5. FFT
+ b64_fft = frame.get("fft_b64") or ""
+ if b64_fft:
+ fft_img = make_rl_image(b64_fft, max_width=80*mm, max_height=58*mm)
+ if fft_img:
+ pairs.append((fft_img, "Frequency Domain Analysis (FFT)"))
+
+ # 6. Any other *_b64 keys not already handled
+ known_keys = {"frame_data", "gradcam_b64", "ela_b64", "lime_b64", "fft_b64"}
+ for key, val in frame.items():
+ if key.endswith("_b64") and key not in known_keys and val:
+ extra_img = make_rl_image(val, max_width=80*mm, max_height=58*mm)
+ if extra_img:
+ label = key.replace("_b64", "").replace("_", " ").title()
+ pairs.append((extra_img, label))
+
+ # Render pairs in rows of 2
+ flowables = []
+ for i in range(0, len(pairs), 2):
+ pair1 = pairs[i]
+ pair2 = pairs[i+1] if i+1 < len(pairs) else (Paragraph(" ", s["muted"]), "")
+
+ t_img = Table([[pair1[0], pair2[0]]], colWidths=[200, 200], hAlign='LEFT')
+ t_img.setStyle(TableStyle([
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('VALIGN', (0,0),(-1,-1),'MIDDLE'),
+ ('BACKGROUND', (0,0),(-1,-1),colors.HexColor('#F9FAFB')),
+ ('BOX', (0,0),(-1,-1),0.8, colors.HexColor('#D1D5DB')),
+ ('LINEAFTER', (0,0),(0,-1),0.5, colors.HexColor('#E5E7EB')),
+ ('TOPPADDING', (0,0),(-1,-1),6),
+ ('BOTTOMPADDING',(0,0),(-1,-1),6),
+ ]))
+
+ t_cap = Table([[pair1[1], pair2[1]]], colWidths=[200, 200])
+ t_cap.setStyle(TableStyle([
+ ('FONTNAME', (0,0),(-1,-1),'Helvetica-Oblique'),
+ ('FONTSIZE', (0,0),(-1,-1),6.5),
+ ('TEXTCOLOR', (0,0),(-1,-1),colors.HexColor('#9CA3AF')),
+ ('ALIGN', (0,0),(-1,-1),'CENTER'),
+ ('TOPPADDING',(0,0),(-1,-1),3),
+ ('BOTTOMPADDING',(0,0),(-1,-1),4),
+ ]))
+
+ wrapper = Table([[t_img], [t_cap]])
+ wrapper.setStyle(TableStyle([
+ ('LEFTPADDING', (0,0),(-1,-1),0),
+ ('RIGHTPADDING', (0,0),(-1,-1),0),
+ ('TOPPADDING', (0,0),(-1,-1),0),
+ ('BOTTOMPADDING', (0,0),(-1,-1),2),
+ ]))
+ flowables.append(wrapper)
+ flowables.append(Spacer(1, 4))
+
+ return flowables
diff --git a/docker-compose.yml b/docker-compose.yml
index 8567eb9..e056025 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -25,7 +25,7 @@ services:
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
- timeout: 10s
+ timeout: 30s
retries: 3
# FastAPI App (only this builds)
@@ -90,10 +90,10 @@ services:
"CMD-SHELL",
"celery -A ${CELERY_MODULE}.celery_app inspect ping -t 1 | grep -q 'ok'",
]
- interval: 10s
- timeout: 5s
+ interval: 30s
+ timeout: 30s
retries: 5
- start_period: 30s
+ start_period: 120s
celery-detection-worker:
image: my-fastapi-app:latest
@@ -124,10 +124,10 @@ services:
"CMD-SHELL",
"celery -A ${CELERY_MODULE}.celery_app inspect ping -t 1 | grep -q 'ok'",
]
- interval: 10s
- timeout: 5s
+ interval: 30s
+ timeout: 30s
retries: 5
- start_period: 30s
+ start_period: 120s
flower:
image: my-fastapi-app:latest
diff --git a/requirements.txt b/requirements.txt
index 68f9ef8..35bc089 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -37,6 +37,8 @@ soundfile
captum>=0.7.0
shap>=0.44.0
matplotlib
+# ── Report Generation ───────────────────────────────────────────────────────
+reportlab>=4.1.0 # professional PDF generation (forensic reports)
# ── XAI Techniques (Advanced) ───────────────────────────────────────────────
timeshap>=0.3.0
mediapipe>=0.10.0