Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions examples/studio_demo/jetson_power_mode_summary.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
{
"summary_role": "jetson-power-mode-comparison",
"schema_version": "inferedge-jetson-power-mode-summary-v1",
"model": "yolov8n",
"backend_key": "tensorrt__jetson",
"precision": "fp16",
"compare_key": "yolov8n__b1__h640w640__fp16",
"baseline_power_mode": "25W",
"candidate_power_mode": "15W",
"baseline_source": "examples/studio_demo/tensorrt_jetson_25w_result.json",
"candidate_source": "examples/studio_demo/tensorrt_jetson_15w_result.json",
"comparison_scope": "same TensorRT FP16 engine, same 640x640 dummy input, same warmup/runs, different Jetson power mode",
"run_config_status": "power_mode_changed",
"run_config_note": "Power mode differs, so this is system evidence for deployment review rather than a same-run_config latency regression test.",
"metrics": {
"mean_ms": {
"baseline_25w": 10.066401,
"candidate_15w": 10.799106,
"delta_ms": 0.732705,
"delta_pct": 7.278719
},
"p95_ms": {
"baseline_25w": 15.476641,
"candidate_15w": 15.43869,
"delta_ms": -0.037951,
"delta_pct": -0.245215
},
"p99_ms": {
"baseline_25w": 15.548438,
"candidate_15w": 15.529218,
"delta_ms": -0.01922,
"delta_pct": -0.123614
},
"fps_value": {
"baseline_25w": 99.340373,
"candidate_15w": 92.600262,
"delta": -6.740111,
"delta_pct": -6.784866
}
},
"tegrastats": {
"baseline_25w": {
"status": "parsed",
"sample_count": 3,
"max_temp_c": 40.656,
"max_temp_name": "gpu",
"vdd_in_mw_avg": 4863.0,
"vdd_in_mw_max": 5827.0
},
"candidate_15w": {
"status": "parsed",
"sample_count": 10,
"max_temp_c": 42.437,
"max_temp_name": "gpu",
"vdd_in_mw_avg": 5707.8,
"vdd_in_mw_max": 7120.0
}
},
"deployment_signal": {
"decision": "review_note",
"reason": "power mode changed; compare as Jetson system evidence",
"recommended_action": "Record power_mode, p95/p99 latency, FPS, and tegrastats summary with the deployment evidence."
},
"notes": [
"This is a short Runtime smoke, not a sustained thermal benchmark.",
"Power readings are supporting evidence from the captured tegrastats samples.",
"AIGuard remains optional; Lab owns the final deployment decision."
],
"created_at": "2026-05-04T17:20:00Z"
}
146 changes: 146 additions & 0 deletions examples/studio_demo/tensorrt_jetson_15w_result.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
{
"schema_version": "inferedge-runtime-result-v1",
"compare_key": "yolov8n__b1__h640w640__fp16",
"backend_key": "tensorrt__jetson",
"runtime_role": "runtime-result",
"manifest_path": "/home/risenano01/InferEdgeForge/builds/yolov8n__jetson__tensorrt__jetson_fp16/metadata.json",
"manifest_applied": true,
"model_name": "model.engine",
"model_path": "/home/risenano01/InferEdgeForge/builds/yolov8n__jetson__tensorrt__jetson_fp16/model.engine",
"engine_name": "tensorrt",
"engine_backend": "tensorrt",
"device_name": "jetson",
"batch": 1,
"height": 640,
"width": 640,
"warmup": 10,
"runs": 50,
"mean_ms": 10.799106,
"p50_ms": 10.338849,
"p95_ms": 15.43869,
"p99_ms": 15.529218,
"fps_value": 92.600262,
"success": true,
"status": "success",
"model": {
"path": "/home/risenano01/InferEdgeForge/builds/yolov8n__jetson__tensorrt__jetson_fp16/model.engine",
"name": "model.engine"
},
"engine": {
"name": "tensorrt",
"backend": "tensorrt",
"available": true,
"status_message": "TensorRT backend is linked. Engine metadata, one-shot inference, and benchmark execution are available."
},
"device": {
"name": "jetson"
},
"precision": "fp16",
"run_config": {
"batch": 1,
"height": 640,
"width": 640,
"warmup": 10,
"runs": 50,
"power_mode": "15W",
"jetson_clocks": "unknown",
"tegrastats_log_path": "results/jetson_evidence/tegrastats_yolov8n_trt_fp16_15w_20260504T171959Z.log",
"manifest_path": "/home/risenano01/InferEdgeForge/builds/yolov8n__jetson__tensorrt__jetson_fp16/metadata.json",
"manifest_applied": true
},
"latency_ms": {
"mean": 10.799106,
"min": 8.930218,
"max": 15.529218,
"std": 1.789919,
"p50": 10.338849,
"p90": 12.319637,
"p95": 15.43869,
"p99": 15.529218
},
"fps": 92.600262,
"benchmark": {
"success": true,
"status": "success",
"message": "benchmark completed"
},
"timestamp": "2026-05-04T17:20:00Z",
"system": {
"os": "linux",
"compiler": "GCC",
"cpp_standard": "17",
"jetson": {
"power_mode": "15W",
"jetson_clocks": "unknown",
"tegrastats_log_path": "results/jetson_evidence/tegrastats_yolov8n_trt_fp16_15w_20260504T171959Z.log"
}
},
"jetson_evidence": {
"power_mode": "15W",
"jetson_clocks": "unknown",
"tegrastats_log_path": "results/jetson_evidence/tegrastats_yolov8n_trt_fp16_15w_20260504T171959Z.log",
"tegrastats_summary": {
"status": "parsed",
"sample_count": 10,
"ram_used_mb_avg": 1066.0,
"ram_used_mb_max": 1090.0,
"ram_total_mb": 7620.0,
"max_temp_c": 42.437,
"max_temp_name": "gpu",
"vdd_in_mw_avg": 5707.8,
"vdd_in_mw_max": 7120.0
}
},
"model_metadata": {
"inputs": [
{
"name": "images",
"element_type": "float32",
"shape": [
1,
3,
640,
640
]
}
],
"outputs": [
{
"name": "output0",
"element_type": "float32",
"shape": [
1,
84,
8400
]
}
]
},
"extra": {
"runtime": "inferedge-runtime",
"json_export": "enabled",
"output_mode": "explicit",
"latest_path": "results/latest.json",
"manifest_recorded": true,
"manifest_precision": "fp16",
"manifest_format": "engine",
"manifest_preset_name": "tensorrt/jetson_fp16",
"manifest_build_id": "yolov8n-tensorrt-jetson_fp16-20260424T133518Z",
"source_model_path": "models/onnx/yolov8n.onnx",
"source_model_sha256": "4b31ebf8213f2971b8136f7ccca475e27f40559a14bc27e0d8a531a933273eb7",
"runtime_artifact_sha256": "29484d824f5be2dfd3e1e801e927298f15f8e77af785711ac6fd429a7445ea22",
"runtime_artifact_path": "/home/risenano01/InferEdgeForge/builds/yolov8n__jetson__tensorrt__jetson_fp16/model.engine",
"input_mode": "dummy",
"input_path": "",
"input_preprocess": "dummy_zero_float32",
"power_mode": "15W",
"jetson_clocks": "unknown",
"tegrastats_log_path": "results/jetson_evidence/tegrastats_yolov8n_trt_fp16_15w_20260504T171959Z.log",
"tegrastats_status": "parsed",
"compare_ready": true,
"compare_key": "yolov8n__b1__h640w640__fp16",
"backend_key": "tensorrt__jetson",
"compare_model_source": "manifest_source_model",
"compare_model_name": "yolov8n"
}
}
25 changes: 25 additions & 0 deletions inferedgelab/studio/routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
)
LATENCY_REGRESSION_SUMMARY = "latency_regression_summary.json"
AIGUARD_PORTFOLIO_CASES = "aiguard_portfolio_cases.json"
JETSON_POWER_MODE_SUMMARY = "jetson_power_mode_summary.json"
DEMO_JOB_ID = "demo_yolov8n_trt_vs_onnx"
STATIC_ASSETS = {
"app.js": "application/javascript",
Expand Down Expand Up @@ -175,6 +176,7 @@ def studio_demo_evidence(request: Request) -> dict[str, Any]:
problem_cases = _load_demo_problem_cases()
guard_demo_cases = _load_aiguard_portfolio_cases()
jetson_evidence_track = _build_jetson_evidence_track(results)
jetson_power_mode_summary = _load_jetson_power_mode_summary()
imported_results = _get_imported_results(request)
imported_results.extend(results)
guard_analysis = _build_demo_guard_analysis(results, evaluation_report)
Expand All @@ -191,6 +193,7 @@ def studio_demo_evidence(request: Request) -> dict[str, Any]:
problem_cases,
guard_demo_cases,
jetson_evidence_track,
jetson_power_mode_summary,
)
_get_demo_jobs(request)[DEMO_JOB_ID] = demo_job
return {
Expand All @@ -206,6 +209,7 @@ def studio_demo_evidence(request: Request) -> dict[str, Any]:
"problem_cases": problem_cases,
"guard_demo_cases": guard_demo_cases,
"jetson_evidence_track": jetson_evidence_track,
"jetson_power_mode_summary": jetson_power_mode_summary,
"guard_analysis": guard_analysis,
"deployment_decision": compare["deployment_decision"],
}
Expand Down Expand Up @@ -431,6 +435,25 @@ def _load_aiguard_portfolio_cases() -> dict[str, Any]:
}


def _load_jetson_power_mode_summary() -> dict[str, Any]:
path = DEMO_EVIDENCE_DIR / JETSON_POWER_MODE_SUMMARY
try:
summary = json.loads(path.read_text(encoding="utf-8"))
except OSError as exc:
raise HTTPException(status_code=500, detail=f"Jetson power-mode summary not found: {JETSON_POWER_MODE_SUMMARY}") from exc
except json.JSONDecodeError as exc:
raise HTTPException(status_code=500, detail=f"Jetson power-mode summary is invalid JSON: {JETSON_POWER_MODE_SUMMARY}") from exc

metrics = summary.get("metrics") if isinstance(summary, dict) else None
deployment_signal = summary.get("deployment_signal") if isinstance(summary, dict) else None
if not isinstance(metrics, dict) or not isinstance(deployment_signal, dict):
raise HTTPException(status_code=500, detail=f"Jetson power-mode summary schema error: {JETSON_POWER_MODE_SUMMARY}")

enriched = dict(summary)
enriched["source"] = f"examples/studio_demo/{JETSON_POWER_MODE_SUMMARY}"
return enriched


def _load_problem_report(file_name: str) -> dict[str, Any]:
path = VALIDATION_PROBLEM_DIR / file_name
try:
Expand Down Expand Up @@ -465,6 +488,7 @@ def _build_demo_job(
problem_cases: list[dict[str, Any]],
guard_demo_cases: dict[str, Any],
jetson_evidence_track: dict[str, Any],
jetson_power_mode_summary: dict[str, Any],
) -> dict[str, Any]:
now = _utc_now_iso()
runtime_result = results[-1] if results else {}
Expand All @@ -488,6 +512,7 @@ def _build_demo_job(
"problem_cases": problem_cases,
"guard_demo_cases": guard_demo_cases,
"jetson_evidence_track": jetson_evidence_track,
"jetson_power_mode_summary": jetson_power_mode_summary,
"summary": compare["judgement"]["summary"],
},
"error": None,
Expand Down
13 changes: 13 additions & 0 deletions inferedgelab/studio/static/app.js
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ let demoProblemCases = [];
let activeGuardAnalysis = null;
let guardDemoCases = null;
let demoJetsonEvidence = null;
let demoPowerModeSummary = null;
const importedResultsByJobId = {};

function createElement(tagName, className, textContent) {
Expand Down Expand Up @@ -377,6 +378,7 @@ async function loadDemoEvidence() {
demoProblemCases = Array.isArray(payload.problem_cases) ? payload.problem_cases : [];
guardDemoCases = payload.guard_demo_cases || null;
demoJetsonEvidence = payload.jetson_evidence_track || null;
demoPowerModeSummary = payload.jetson_power_mode_summary || null;
compareData = payload.compare || null;
updateGuardEvidence(payload.guard_analysis || payload.compare?.guard_analysis || null);
selectedJobId = payload.job_id || payload.job?.job_id || selectedJobId;
Expand Down Expand Up @@ -512,6 +514,17 @@ function renderDemoEvaluation(report) {
evidenceItem("tegrastats", demoJetsonEvidence.tegrastats_status || "-"),
);
}

if (demoPowerModeSummary) {
const meanDelta = demoPowerModeSummary.metrics?.mean_ms?.delta_pct;
const fpsDelta = demoPowerModeSummary.metrics?.fps_value?.delta_pct;
target.append(
evidenceItem("power_compare", `${demoPowerModeSummary.baseline_power_mode || "25W"} vs ${demoPowerModeSummary.candidate_power_mode || "15W"}`),
evidenceItem("mean_delta", formatPercent(meanDelta)),
evidenceItem("fps_delta", formatPercent(fpsDelta)),
evidenceItem("power_note", demoPowerModeSummary.run_config_status || "-"),
);
}
}

function renderPipeline() {
Expand Down
7 changes: 7 additions & 0 deletions tests/test_studio_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def test_studio_static_assets_include_redesigned_ui_contracts():
assert "loadDemoEvidence" in app_text
assert "renderDemoEvaluation" in app_text
assert "demoJetsonEvidence" in app_text
assert "demoPowerModeSummary" in app_text
assert "renderDemoProblemCases" in app_text
assert "renderGuardEvidence" in app_text
assert "renderGuardDemoCases" in app_text
Expand Down Expand Up @@ -373,6 +374,11 @@ def test_studio_demo_evidence_loads_compare_ready_pair():
assert response["jetson_evidence_track"]["runtime_result_source"] == (
"examples/studio_demo/tensorrt_jetson_25w_result.json"
)
assert response["jetson_power_mode_summary"]["baseline_power_mode"] == "25W"
assert response["jetson_power_mode_summary"]["candidate_power_mode"] == "15W"
assert response["jetson_power_mode_summary"]["metrics"]["mean_ms"]["delta_pct"] == 7.278719
assert response["jetson_power_mode_summary"]["metrics"]["fps_value"]["delta_pct"] == -6.784866
assert response["jetson_power_mode_summary"]["deployment_signal"]["decision"] == "review_note"
assert response["compare"]["status"] == "ok"
assert response["compare"]["judgement"]["overall"] == "tradeoff_faster"
assert response["guard_analysis"]["guard_verdict"] == "review_required"
Expand Down Expand Up @@ -430,6 +436,7 @@ def test_studio_demo_evidence_is_listed_and_selectable_as_job():
assert detail["result"]["runtime_result"]["backend_key"] == "tensorrt__jetson"
assert detail["result"]["runtime_result"]["run_config"]["power_mode"] == "25W"
assert detail["result"]["jetson_evidence_track"]["power_mode"] == "25W"
assert detail["result"]["jetson_power_mode_summary"]["candidate_power_mode"] == "15W"
assert detail["result"]["guard_analysis"]["guard_verdict"] == "review_required"
assert detail["result"]["guard_demo_cases"]["case_count"] == 4
assert detail["result"]["comparison"]["base"]["backend_key"] == "onnxruntime__cpu"
Expand Down
Loading