From 97b072bf51fee33ae120aa305febc76cfae9a5e4 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 14:58:38 +0100 Subject: [PATCH 01/38] feat(dommyrock-analyzer-cli): initial support for agentic cli generation --- Cargo.toml | 2 +- analyzer-discovery.json | 2226 +++++++++++++++++++++++++++++++++++++++ src/api/executor.rs | 85 ++ src/api/mod.rs | 147 +++ src/api/schema.rs | 104 ++ src/client/mod.rs | 54 +- src/discovery.rs | 205 ++++ src/main.rs | 75 +- 8 files changed, 2895 insertions(+), 3 deletions(-) create mode 100644 analyzer-discovery.json create mode 100644 src/api/executor.rs create mode 100644 src/api/mod.rs create mode 100644 src/api/schema.rs create mode 100644 src/discovery.rs diff --git a/Cargo.toml b/Cargo.toml index bee6d43..1a838c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ name = "analyzer" path = "src/main.rs" [dependencies] -clap = { version = "4", features = ["derive", "env", "color", "help", "usage", "error-context", "suggestions", "wrap_help"] } +clap = { version = "4", features = ["derive", "env", "color", "help", "usage", "error-context", "suggestions", "wrap_help", "string"] } reqwest = { version = "0.12", default-features = false, features = ["json", "multipart", "stream", "rustls-tls"] } serde = { version = "1", features = ["derive"] } serde_json = "1" diff --git a/analyzer-discovery.json b/analyzer-discovery.json new file mode 100644 index 0000000..5e4f996 --- /dev/null +++ b/analyzer-discovery.json @@ -0,0 +1,2226 @@ +{ + "kind": "discovery#restDescription", + "discoveryVersion": "v1", + "id": "analyzer-api-routes:0.5.0", + "name": "analyzer-api-routes", + "version": "0.5.0", + "title": "Analyzer API routes", + "description": "", + "protocol": "rest", + "rootUrl": "", + "servicePath": "", + "schemas": { + "AiResult": { + "id": "AiResult", + "properties": { + "reasoning": { + "description": "AI reasoning, which lead to current status", + "type": "string" + }, + "sources": { + "description": "List of documents used by AI to produce current status.", + "items": { + "$ref": "UserUploadedDocument" + }, + "type": "array" + }, + "status": { + "$ref": "AiStatus" + }, + "user-action": { + "$ref": "SuggestionResponse" + } + }, + "type": "object" + }, + "AiStatus": { + "description": "Represents the status of a requirement determined by ai", + "enum": [ + "passed", + "failed", + "unknown" + ], + "id": "AiStatus", + "type": "string" + }, + "AiSuggestionStatus": { + "description": "Status of the AI suggestions computation.", + "id": "AiSuggestionStatus", + "properties": { + "status": { + "$ref": "Status" + } + }, + "type": "object" + }, + "AnalysisFilter": { + "id": "AnalysisFilter", + "properties": { + "query-name": { + "$ref": "QueryName" + }, + "values": { + "description": "Avaliable filter values with their count.", + "items": { + "$ref": "FilterValue" + }, + "type": "array" + } + }, + "type": "object" + }, + "AnalysisFindings": { + "description": "Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.", + "id": "AnalysisFindings" + }, + "AnalysisId": { + "description": "A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.", + "format": "uuid", + "id": "AnalysisId", + "type": "string" + }, + "AnalysisInfo": { + "description": "Helper struct to define if a analysis should be by default enabled", + "id": "AnalysisInfo", + "properties": { + "default": { + "type": "boolean" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "AnalysisOverview": { + "description": "Like [`ScanOverview`] but for single analysis.", + "id": "AnalysisOverview" + }, + "AnalysisQueryUnion": { + "description": "Union of all available query parameters for analyses.", + "id": "AnalysisQueryUnion" + }, + "AnalysisResultDTO": { + "description": "AnalysisResult but with count of all findings,\nbefore pagination was applied.", + "id": "AnalysisResultDTO", + "properties": { + "filters": { + "description": "Filters that can be used in this analysis.", + "type": "object" + }, + "findings": { + "$ref": "AnalysisFindings" + }, + "total-findings": { + "description": "Total count of findings _after_ filtering, but _before_ pagination.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "AnalysisScore": { + "description": "The score of an analysis,", + "id": "AnalysisScore", + "properties": { + "id": { + "$ref": "AnalysisId" + }, + "score": { + "$ref": "Score" + }, + "type": { + "$ref": "AnalysisType" + } + }, + "type": "object" + }, + "AnalysisState": { + "description": "A analysis that runs for one particular system image.", + "id": "AnalysisState", + "properties": { + "id": { + "$ref": "AnalysisId" + }, + "status": { + "$ref": "AnalysisStatus" + }, + "type": { + "$ref": "ScanType" + } + }, + "type": "object" + }, + "AnalysisStatus": { + "description": "Represents the current execution status of an analysis task.", + "enum": [ + "success", + "pending", + "in-progress", + "canceled", + "error" + ], + "id": "AnalysisStatus", + "type": "string" + }, + "AnalysisType": { + "description": "Type of the analysis", + "enum": [ + "info", + "kernel", + "cve", + "password-hash", + "hardening", + "malware", + "software-bom", + "crypto", + "capabilities", + "symbols", + "tasks", + "stack-overflow" + ], + "id": "AnalysisType", + "type": "string" + }, + "AnalyzerResult": { + "id": "AnalyzerResult", + "properties": { + "status": { + "$ref": "AnalyzerStatus" + } + }, + "type": "object" + }, + "AnalyzerStatus": { + "description": "Represents the status of a requirement determined by analyzer", + "enum": [ + "passed", + "failed", + "unknown", + "not-applicable" + ], + "id": "AnalyzerStatus", + "type": "string" + }, + "ApiScanType": { + "description": "List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.", + "id": "ApiScanType" + }, + "BindFilter": { + "enum": [ + "local", + "global", + "weak" + ], + "id": "BindFilter", + "type": "string" + }, + "CapabilitiesOverview": { + "description": "Overview for Capability analysis.", + "id": "CapabilitiesOverview", + "properties": { + "capabilities": { + "description": "Capability found and their number of occurrences.", + "type": "object" + }, + "counts": { + "$ref": "RiskLevelCount" + }, + "executable_count": { + "description": "Total number executables.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CapabilityParams": { + "id": "CapabilityParams", + "properties": { + "search": { + "type": "string" + }, + "severity-filter": { + "items": { + "$ref": "SeverityFilter" + }, + "type": "array" + }, + "sort-by": { + "$ref": "CapabilitySortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "CapabilitySortBy": { + "enum": [ + "severity" + ], + "id": "CapabilitySortBy", + "type": "string" + }, + "Checks": { + "description": "Represents the checks performed in the report", + "id": "Checks", + "properties": { + "failed": { + "description": "Number of checks that failed (determined either by analyzer or overwritten by the user)", + "format": "int32", + "type": "integer" + }, + "not-applicable": { + "description": "Number of not applicable requirements", + "format": "int32", + "type": "integer" + }, + "passed": { + "description": "Number of checks that passed (determined either by analyzer or overwritten by the user)", + "format": "int32", + "type": "integer" + }, + "suggestion-available": { + "description": "Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.", + "format": "int32", + "type": "integer" + }, + "total": { + "description": "Total number of checks performed", + "format": "int32", + "type": "integer" + }, + "unknown": { + "description": "Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ComponentType": { + "enum": [ + "application", + "framework", + "library", + "container", + "operating-system", + "device", + "firmware", + "file" + ], + "id": "ComponentType", + "type": "string" + }, + "CreateObject": { + "description": "The request to create a new object.", + "id": "CreateObject", + "properties": { + "description": { + "description": "Description of the object.", + "type": "string" + }, + "name": { + "description": "Name of the object.", + "type": "string" + }, + "tags": { + "description": "Tags associated with the object.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "CryptoOverview": { + "description": "Overview for Crypto analysis.", + "id": "CryptoOverview", + "properties": { + "certificates": { + "description": "Number of certificates found.", + "format": "int64", + "type": "integer" + }, + "private_keys": { + "description": "Number of private keys found.", + "format": "int64", + "type": "integer" + }, + "public_keys": { + "description": "Number of public keys found.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CryptoParams": { + "id": "CryptoParams", + "properties": { + "search": { + "type": "string" + }, + "sort-by": { + "$ref": "CryptoSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + }, + "type-filter": { + "items": { + "$ref": "CryptoTypeFilter" + }, + "type": "array" + } + }, + "type": "object" + }, + "CryptoSortBy": { + "enum": [ + "type", + "key-size", + "filename", + "path", + "issuer" + ], + "id": "CryptoSortBy", + "type": "string" + }, + "CryptoTypeFilter": { + "enum": [ + "certificate", + "private-key", + "public-key" + ], + "id": "CryptoTypeFilter", + "type": "string" + }, + "CveOverview": { + "description": "Overview for Cve analysis.", + "id": "CveOverview", + "properties": { + "counts": { + "$ref": "CveSeverityCount" + }, + "products": { + "description": "Cve counts for each \"product\" (binary, library, etc.).", + "type": "object" + }, + "total": { + "description": "Sum of all `counts`.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CveParams": { + "id": "CveParams", + "properties": { + "patch-filter": { + "items": { + "$ref": "CvePatchFilter" + }, + "type": "array" + }, + "search": { + "type": "string" + }, + "severity-filter": { + "items": { + "$ref": "CveSeverityFilter" + }, + "type": "array" + }, + "sort-by": { + "$ref": "CveSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "CvePatchFilter": { + "enum": [ + "available", + "unavailable" + ], + "id": "CvePatchFilter", + "type": "string" + }, + "CveSeverityCount": { + "description": "Maps CVE severity to its count", + "id": "CveSeverityCount", + "properties": { + "critical": { + "format": "int64", + "type": "integer" + }, + "high": { + "format": "int64", + "type": "integer" + }, + "low": { + "format": "int64", + "type": "integer" + }, + "medium": { + "format": "int64", + "type": "integer" + }, + "unknown": { + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CveSeverityFilter": { + "enum": [ + "low", + "medium", + "high", + "critical" + ], + "id": "CveSeverityFilter", + "type": "string" + }, + "CveSortBy": { + "enum": [ + "severity" + ], + "id": "CveSortBy", + "type": "string" + }, + "CyberResilienceActReport": { + "description": "Represents a Cyber Resilience Act report", + "id": "CyberResilienceActReport", + "properties": { + "checks": { + "$ref": "Checks" + }, + "created-at": { + "description": "Date and time when the report was created.", + "format": "date-time", + "type": "string" + }, + "name": { + "description": "Name of the report.", + "type": "string" + }, + "sections": { + "description": "List of categories in the report.", + "items": { + "$ref": "Section" + }, + "type": "array" + }, + "updated-at": { + "description": "Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "DockerAnalysis": { + "description": "Represents different types of analyses for Docker containers.", + "enum": [ + "info", + "cve", + "password-hash", + "crypto", + "software-bom", + "malware", + "hardening", + "capabilities" + ], + "id": "DockerAnalysis", + "type": "string" + }, + "DockerInfo": { + "description": "Container metadata information\n\nRepresents various metadata attributes of a container image", + "id": "DockerInfo", + "properties": { + "arch": { + "description": "List of supported CPU architectures for the container", + "items": { + "type": "string" + }, + "type": "array" + }, + "ctime": { + "description": "List of creation timestamps for container layers", + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "description": "List of environment variables defined in the container", + "items": { + "type": "string" + }, + "type": "array" + }, + "history": { + "description": "List of commands used to build the container layers", + "items": { + "$ref": "History" + }, + "type": "array" + }, + "os": { + "description": "List of supported operating systems for the container", + "items": { + "type": "string" + }, + "type": "array" + }, + "os_name": { + "description": "Name of the base operating system used in the container", + "type": "string" + }, + "os_version": { + "description": "Version of the base operating system used in the container", + "type": "string" + }, + "tags": { + "description": "List of container image tags associated with the image", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "DockerInfoResult": { + "description": "Info result for docker image", + "id": "DockerInfoResult" + }, + "DocumentListItem": { + "description": "A single document entry in a listing.", + "id": "DocumentListItem", + "properties": { + "file-name": { + "description": "Original file name, serves as the unique key within a scan's document storage", + "type": "string" + } + }, + "type": "object" + }, + "DocumentListResponse": { + "description": "A list of documents associated with a scan.", + "id": "DocumentListResponse", + "properties": { + "documents": { + "items": { + "$ref": "DocumentListItem" + }, + "type": "array" + } + }, + "type": "object" + }, + "DocumentUploadResponse": { + "description": "The response after successfully uploading a document.", + "id": "DocumentUploadResponse", + "properties": { + "file-name": { + "description": "Original file name, serves as the unique key within a scan's document storage", + "type": "string" + } + }, + "type": "object" + }, + "FeaturesFilter": { + "enum": [ + "seccomp", + "seccomp-filter", + "security-network", + "stack-protector", + "fortify-source", + "vmap-kernel-stack", + "usercopy", + "heap-freelist-obfuscation", + "executable-memory-protection", + "kaslr", + "apparmor", + "selinux", + "smack", + "tomoyo", + "yama" + ], + "id": "FeaturesFilter", + "type": "string" + }, + "FilterValue": { + "id": "FilterValue", + "properties": { + "count": { + "description": "Count of findings matching this value for current filter options.", + "format": "int64", + "type": "integer" + }, + "value": { + "description": "Filter value that can be passed in query paramters.", + "type": "string" + } + }, + "type": "object" + }, + "HardeningOverview": { + "description": "Overview for Hardening analysis.", + "id": "HardeningOverview", + "properties": { + "counts": { + "$ref": "HardeningSeverityCount" + }, + "total": { + "description": "Sum of all `counts`.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "HardeningParams": { + "id": "HardeningParams", + "properties": { + "search": { + "type": "string" + }, + "severity-filter": { + "items": { + "$ref": "HardeningSeverityFilter" + }, + "type": "array" + }, + "sort-by": { + "$ref": "HardeningSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "HardeningSeverityCount": { + "description": "Maps Hardening severity to its count", + "id": "HardeningSeverityCount", + "properties": { + "high": { + "format": "int64", + "type": "integer" + }, + "low": { + "format": "int64", + "type": "integer" + }, + "medium": { + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "HardeningSeverityFilter": { + "enum": [ + "low", + "medium", + "high" + ], + "id": "HardeningSeverityFilter", + "type": "string" + }, + "HardeningSortBy": { + "enum": [ + "severity", + "filename", + "canary", + "nx", + "pie", + "relro", + "fortify" + ], + "id": "HardeningSortBy", + "type": "string" + }, + "HealthStatus": { + "description": "Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.", + "id": "HealthStatus", + "properties": { + "healthy": { + "type": "boolean" + }, + "message": { + "type": "string" + } + }, + "type": "object" + }, + "History": { + "id": "History", + "properties": { + "created": { + "format": "date-time", + "type": "string" + }, + "created_by": { + "type": "string" + }, + "empty_layer": { + "type": "boolean" + } + }, + "type": "object" + }, + "IdfAnalysis": { + "description": "Represents analyses specific to IDF (IoT Device Framework) targets.", + "enum": [ + "info", + "cve", + "software-bom", + "symbols", + "tasks", + "stack-overflow" + ], + "id": "IdfAnalysis", + "type": "string" + }, + "IdfInfo": { + "description": "IdfInfo analysis entry for idf image", + "id": "IdfInfo", + "properties": { + "arch": { + "description": "Architecture type", + "type": "string" + }, + "compiler": { + "description": "Compiler name and version used to create this image", + "type": "string" + }, + "freertos": { + "description": "freertos version", + "type": "string" + }, + "idf": { + "description": "idf version", + "type": "string" + } + }, + "type": "object" + }, + "IdfInfoResult": { + "description": "Info result for idf image", + "id": "IdfInfoResult" + }, + "IdfSymbolParams": { + "id": "IdfSymbolParams", + "properties": { + "bind-filter": { + "items": { + "$ref": "BindFilter" + }, + "type": "array" + }, + "search": { + "type": "string" + }, + "sort-by": { + "$ref": "IdfSymbolSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + }, + "type-filter": { + "items": { + "$ref": "TypeFilter" + }, + "type": "array" + } + }, + "type": "object" + }, + "IdfSymbolSortBy": { + "enum": [ + "name" + ], + "id": "IdfSymbolSortBy", + "type": "string" + }, + "IdfTaskParams": { + "id": "IdfTaskParams", + "properties": { + "search": { + "type": "string" + }, + "sort-by": { + "$ref": "IdfTaskSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "IdfTaskSortBy": { + "enum": [ + "function", + "name" + ], + "id": "IdfTaskSortBy", + "type": "string" + }, + "Image": { + "description": "A image on which a scan is executed", + "id": "Image", + "properties": { + "file_name": { + "description": "The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.", + "type": "string" + }, + "id": { + "$ref": "ImageId" + } + }, + "type": "object" + }, + "ImageId": { + "description": "A wrapper struct `ImageId` around a UUID.\n ID in the images table.", + "format": "uuid", + "id": "ImageId", + "type": "string" + }, + "ImageType": { + "description": "Type of the image used in scan", + "enum": [ + "linux", + "docker", + "idf" + ], + "id": "ImageType", + "type": "string" + }, + "Info": { + "id": "Info" + }, + "InfoOverview": { + "id": "InfoOverview" + }, + "KernelOverview": { + "description": "Overview for Kernel analysis.", + "id": "KernelOverview", + "properties": { + "count": { + "description": "Number of kernel security features enabled.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "KernelParams": { + "id": "KernelParams", + "properties": { + "features-filter": { + "items": { + "$ref": "FeaturesFilter" + }, + "type": "array" + }, + "sort-by": { + "$ref": "KernelSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + }, + "status-filter": { + "items": { + "$ref": "StatusFilter" + }, + "type": "array" + } + }, + "type": "object" + }, + "KernelSortBy": { + "enum": [ + "features", + "status" + ], + "id": "KernelSortBy", + "type": "string" + }, + "LinuxAnalysis": { + "description": "Represents different types of analyses that can be performed on a Linux system.", + "enum": [ + "info", + "kernel", + "cve", + "password-hash", + "crypto", + "software-bom", + "malware", + "hardening", + "capabilities" + ], + "id": "LinuxAnalysis", + "type": "string" + }, + "LinuxInfo": { + "description": "Represents the information about the system", + "id": "LinuxInfo", + "properties": { + "arch": { + "description": "The tags associated with the system", + "type": "string" + }, + "banner": { + "description": "The operating system name", + "type": "string" + }, + "kernel_version": { + "description": "The kernel version", + "type": "string" + }, + "libc": { + "description": "The operating system version", + "type": "string" + } + }, + "type": "object" + }, + "LinuxInfoResult": { + "description": "Info result for linux image", + "id": "LinuxInfoResult" + }, + "MalwareOverview": { + "description": "Overview for Malware analysis.", + "id": "MalwareOverview", + "properties": { + "count": { + "description": "Number of malware detected.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "MalwareParams": { + "id": "MalwareParams", + "properties": { + "sort-by": { + "$ref": "MalwareSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "MalwareSortBy": { + "enum": [ + "filename" + ], + "id": "MalwareSortBy", + "type": "string" + }, + "NewScanResponse": { + "description": "The response if a new scan is created.", + "id": "NewScanResponse", + "properties": { + "id": { + "$ref": "ScanId" + } + }, + "type": "object" + }, + "ObjectId": { + "description": "A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.", + "format": "uuid", + "id": "ObjectId", + "type": "string" + }, + "PasswordHashOverview": { + "description": "Overview for Password Hash analysis.", + "id": "PasswordHashOverview", + "properties": { + "count": { + "description": "Number of passwords decoded.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "PasswordHashParams": { + "id": "PasswordHashParams", + "properties": { + "severity-filter": { + "items": { + "$ref": "PasswordHashSeverityFilter" + }, + "type": "array" + }, + "sort-by": { + "$ref": "PasswordHashSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + } + }, + "type": "object" + }, + "PasswordHashSeverityFilter": { + "enum": [ + "medium", + "high" + ], + "id": "PasswordHashSeverityFilter", + "type": "string" + }, + "PasswordHashSortBy": { + "enum": [ + "severity", + "username" + ], + "id": "PasswordHashSortBy", + "type": "string" + }, + "QueryName": { + "description": "Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.", + "enum": [ + "license-filter" + ], + "id": "QueryName", + "type": "string" + }, + "Requirement": { + "description": "Represents a requirement in the report", + "id": "Requirement", + "properties": { + "advice": { + "description": "Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.", + "type": "string" + }, + "ai-suggestion": { + "$ref": "AiResult" + }, + "analyzer": { + "$ref": "AnalyzerResult" + }, + "description": { + "description": "Description of the requirement.", + "type": "string" + }, + "explanation": { + "description": "Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.", + "type": "string" + }, + "id": { + "$ref": "RequirementId" + }, + "policy-ref": { + "description": "Reference to the policy associated with the requirement.", + "type": "string" + }, + "status": { + "$ref": "RequirementStatus" + }, + "user-overwrite": { + "$ref": "UserResult" + } + }, + "type": "object" + }, + "RequirementId": { + "description": "Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.", + "enum": [ + "cve-exploits", + "password-strength", + "security-updates", + "update-notifications", + "access-control", + "unauthorized-access", + "data-encryption", + "data-integrity", + "data-collection", + "essential-availability", + "minimise-impact", + "attack-surfaces", + "attack-reduction", + "activity-monitoring", + "data-removal", + "vulns-documentation", + "vulns-security-updates", + "update-security-and-automation", + "security-testing-and-review", + "fixed-vulns-disclosure", + "vulns-coordinated-disclosure", + "vulns-reporting-contact", + "security-updates-dissemination" + ], + "id": "RequirementId", + "type": "string" + }, + "RequirementOverwrite": { + "description": "User action on a CRA requirement — either a manual overwrite or an AI suggestion response.", + "id": "RequirementOverwrite" + }, + "RequirementStatus": { + "description": "Overall status of the requirement\ncomputed by taking into account all user interactions.", + "enum": [ + "passed", + "failed", + "unknown", + "unknown-with-suggestion", + "not-applicable" + ], + "id": "RequirementStatus", + "type": "string" + }, + "RiskLevelCount": { + "description": "Count all different risk levels of the analysis.", + "id": "RiskLevelCount", + "properties": { + "critical": { + "format": "int64", + "type": "integer" + }, + "high": { + "format": "int64", + "type": "integer" + }, + "low": { + "format": "int64", + "type": "integer" + }, + "medium": { + "format": "int64", + "type": "integer" + }, + "none": { + "format": "int64", + "type": "integer" + }, + "unknown": { + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "SbomParams": { + "id": "SbomParams", + "properties": { + "license-filter": { + "items": { + "type": "string" + }, + "type": "array" + }, + "search": { + "type": "string" + }, + "sort-by": { + "$ref": "SbomSortBy" + }, + "sort-ord": { + "$ref": "SortOrd" + }, + "type-filter": { + "items": { + "$ref": "ComponentType" + }, + "type": "array" + } + }, + "type": "object" + }, + "SbomSortBy": { + "enum": [ + "name" + ], + "id": "SbomSortBy", + "type": "string" + }, + "Scan": { + "description": "Represents a scan that aggregates multiple analyses executed on a particular image.", + "id": "Scan", + "properties": { + "analysis": { + "description": "All analyses processed as part of this scan.", + "items": { + "$ref": "AnalysisState" + }, + "type": "array" + }, + "created": { + "description": "The date and time when the scan was initiated.", + "format": "date-time", + "type": "string" + }, + "id": { + "$ref": "ScanId" + }, + "image": { + "$ref": "Image" + }, + "image_type": { + "$ref": "ImageType" + }, + "info": { + "$ref": "Info" + }, + "score": { + "$ref": "ScanScore" + } + }, + "type": "object" + }, + "ScanId": { + "description": "A wrapper struct `ScanId` around a UUID.\n ID in the scans table.", + "format": "uuid", + "id": "ScanId", + "type": "string" + }, + "ScanOverview": { + "description": "Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.", + "id": "ScanOverview", + "properties": { + "capabilities": { + "$ref": "CapabilitiesOverview" + }, + "crypto": { + "$ref": "CryptoOverview" + }, + "cve": { + "$ref": "CveOverview" + }, + "hardening": { + "$ref": "HardeningOverview" + }, + "info": { + "$ref": "InfoOverview" + }, + "kernel": { + "$ref": "KernelOverview" + }, + "malware": { + "$ref": "MalwareOverview" + }, + "password-hash": { + "$ref": "PasswordHashOverview" + }, + "software-bom": { + "$ref": "SoftwareBOMOverview" + }, + "stack-overflow": { + "$ref": "StackOverflowOverview" + }, + "symbols": { + "$ref": "SymbolsOverview" + }, + "tasks": { + "$ref": "TasksOverview" + } + }, + "type": "object" + }, + "ScanScore": { + "description": "The calculate score with an weighted algorithm over all analysis.", + "id": "ScanScore", + "properties": { + "score": { + "$ref": "Score" + }, + "scores": { + "description": "Individual analyses scores.", + "items": { + "$ref": "AnalysisScore" + }, + "type": "array" + } + }, + "type": "object" + }, + "ScanStatus": { + "description": "The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).", + "id": "ScanStatus", + "properties": { + "id": { + "$ref": "ScanId" + }, + "status": { + "$ref": "AnalysisStatus" + } + }, + "type": "object" + }, + "ScanType": { + "description": "Represents a unified type for analyses across all supported images.", + "id": "ScanType" + }, + "Score": { + "description": "Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.", + "format": "int32", + "id": "Score", + "type": "integer" + }, + "Section": { + "description": "Represents a group of requirements, grouped by [SubSection]s.", + "id": "Section", + "properties": { + "label": { + "description": "Name of the requirement", + "type": "string" + }, + "policy-ref": { + "description": "Reference to the policy associated with the requirement", + "type": "string" + }, + "sub-sections": { + "description": "List of sub-requirements or checks associated with this requirement", + "items": { + "$ref": "SubSection" + }, + "type": "array" + } + }, + "type": "object" + }, + "SeverityFilter": { + "enum": [ + "none", + "low", + "medium", + "high", + "critical", + "unknown" + ], + "id": "SeverityFilter", + "type": "string" + }, + "SoftwareBOMOverview": { + "description": "Overview for Software BOM analysis.", + "id": "SoftwareBOMOverview", + "properties": { + "count": { + "description": "Total number of software BOM entries.", + "format": "int64", + "type": "integer" + }, + "licenses": { + "description": "License type and their number of occurrences.", + "type": "object" + } + }, + "type": "object" + }, + "SortOrd": { + "enum": [ + "asc", + "desc" + ], + "id": "SortOrd", + "type": "string" + }, + "StackOverflowOverview": { + "description": "Overview for Stack Overflow analysis.", + "id": "StackOverflowOverview", + "properties": { + "method": { + "description": "Name of the protection method used,\nor `None` if stack overflow protection is not enabled.", + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "Status of the AI suggestions computation.", + "enum": [ + "in-progress", + "finished" + ], + "id": "Status", + "type": "string" + }, + "StatusFilter": { + "enum": [ + "enabled", + "disabled" + ], + "id": "StatusFilter", + "type": "string" + }, + "SubSection": { + "description": "Represents a group of requirements", + "id": "SubSection", + "properties": { + "label": { + "description": "Name of the requirement", + "type": "string" + }, + "requirements": { + "description": "List of sub-requirements or checks associated with this requirement", + "items": { + "$ref": "Requirement" + }, + "type": "array" + } + }, + "type": "object" + }, + "SuggestionResponse": { + "description": "User response to AI suggestion.", + "enum": [ + "accepted", + "rejected" + ], + "id": "SuggestionResponse", + "type": "string" + }, + "SymbolsOverview": { + "description": "Overview for Symbol analysis.", + "id": "SymbolsOverview", + "properties": { + "count": { + "description": "Number of analyzed symbols.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "TasksOverview": { + "description": "Overview for Task analysis.", + "id": "TasksOverview", + "properties": { + "count": { + "description": "Number of analysed tasks.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "TypeFilter": { + "enum": [ + "sect", + "func", + "obj", + "file", + "notype" + ], + "id": "TypeFilter", + "type": "string" + }, + "UpdateObject": { + "description": "The request to update fields on an [`Object`].", + "id": "UpdateObject", + "properties": { + "description": { + "description": "Description of the object.", + "type": "string" + }, + "favorite": { + "description": "Sets if the object is a favorite or not.", + "type": "boolean" + }, + "name": { + "description": "Name of the object.", + "type": "string" + }, + "tags": { + "description": "The tags associated with the object.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "UserId": { + "description": "A wrapper struct `UserId` around a UUID.\n ID in the users table.", + "format": "uuid", + "id": "UserId", + "type": "string" + }, + "UserResult": { + "id": "UserResult", + "properties": { + "status": { + "$ref": "UserStatus" + } + }, + "type": "object" + }, + "UserStatus": { + "description": "Represents the status of a requirement overwritten by the user", + "enum": [ + "passed", + "failed" + ], + "id": "UserStatus", + "type": "string" + }, + "UserUploadedDocument": { + "description": "Description of the user provided file\nused by ai to give its suggestion.", + "id": "UserUploadedDocument", + "properties": { + "filename": { + "description": "Name of the user uploaded file.", + "type": "string" + } + }, + "type": "object" + } + }, + "resources": { + "api": { + "resources": { + "health": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.health.list", + "httpMethod": "GET", + "path": "api/health", + "description": "Returns if the service is in an healthy state.", + "response": { + "$ref": "HealthStatus" + } + } + } + }, + "objects": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.objects.create", + "httpMethod": "POST", + "path": "api/objects", + "description": "Create new object", + "request": { + "$ref": "CreateObject" + } + }, + "delete": { + "id": "analyzer-api-routes.api.objects.delete", + "httpMethod": "DELETE", + "path": "api/objects/{id}", + "description": "Deletes a object and all related scans.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Unique identifier of the object to delete" + } + }, + "parameterOrder": [ + "id" + ] + }, + "get": { + "id": "analyzer-api-routes.api.objects.get", + "httpMethod": "GET", + "path": "api/objects/{id}", + "description": "Retrieve an object by its ID.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": [ + "id" + ] + }, + "list": { + "id": "analyzer-api-routes.api.objects.list", + "httpMethod": "GET", + "path": "api/objects", + "description": "Retrieve a list of all objects of the current user.", + "parameters": { + "end_timestamp": { + "type": "string", + "required": false, + "location": "query", + "description": "End timestamp for pagination.", + "format": "datetime" + }, + "id": { + "type": "string", + "required": false, + "location": "query", + "description": "Pagination cursor (UUID).", + "format": "uuid" + }, + "limit": { + "type": "integer", + "required": false, + "location": "query", + "description": "Maximum number of items per page.", + "format": "int32" + }, + "start_timestamp": { + "type": "string", + "required": false, + "location": "query", + "description": "Start timestamp for pagination.", + "format": "datetime" + } + } + }, + "update": { + "id": "analyzer-api-routes.api.objects.update", + "httpMethod": "PUT", + "path": "api/objects/{id}", + "description": "Update an object", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": [ + "id" + ], + "request": { + "$ref": "UpdateObject" + } + } + }, + "resources": { + "scans": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.objects.scans.list", + "httpMethod": "GET", + "path": "api/objects/{id}/scans", + "description": "Those scans could be", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": [ + "id" + ] + } + } + } + } + }, + "scans": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.create", + "httpMethod": "POST", + "path": "api/scans", + "description": "Schedule a new scan.", + "response": { + "$ref": "NewScanResponse" + } + }, + "delete": { + "id": "analyzer-api-routes.api.scans.delete", + "httpMethod": "DELETE", + "path": "api/scans/{id}", + "description": "Delete a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ] + }, + "get": { + "id": "analyzer-api-routes.api.scans.get", + "httpMethod": "GET", + "path": "api/scans/{id}", + "description": "Returns a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "Scan" + } + }, + "list": { + "id": "analyzer-api-routes.api.scans.list", + "httpMethod": "GET", + "path": "api/scans", + "description": "Retrieve a list of scans." + } + }, + "resources": { + "cancel": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.cancel.create", + "httpMethod": "POST", + "path": "api/scans/{id}/cancel", + "description": "This can be used to cancel an already pending or running scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ] + } + } + }, + "compliance-check": { + "resources": { + "cyber-resilience-act": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act", + "description": "Computes compliance with Cyber Resilience Act", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "CyberResilienceActReport" + } + } + }, + "resources": { + "ai-suggestion": { + "resources": { + "begin": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create", + "httpMethod": "POST", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin", + "description": "Triggers CRA AI suggestion using user-provided documents.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ] + } + } + }, + "status": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status", + "description": "Returns status of the CRA AI suggestion.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "AiSuggestionStatus" + } + } + } + } + } + }, + "overwrite": { + "methods": { + "overwrite_compliance_check_requirement": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement", + "httpMethod": "PUT", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/overwrite", + "description": "Overwrites compliance check requirement", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ], + "request": { + "$ref": "RequirementOverwrite" + } + } + } + }, + "report": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/report", + "description": "Downloads Cyber Resilience Act compliance report as PDF", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ] + } + } + } + } + } + } + }, + "documents": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.documents.create", + "httpMethod": "POST", + "path": "api/scans/{id}/documents", + "description": "Upload a document for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "DocumentUploadResponse" + } + }, + "delete": { + "id": "analyzer-api-routes.api.scans.documents.delete", + "httpMethod": "DELETE", + "path": "api/scans/{id}/documents/{file_name}", + "description": "Delete a single document for a scan.", + "parameters": { + "file_name": { + "type": "string", + "required": true, + "location": "path", + "description": "Document file name" + }, + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id", + "file_name" + ] + }, + "delete_documents": { + "id": "analyzer-api-routes.api.scans.documents.delete_documents", + "httpMethod": "DELETE", + "path": "api/scans/{id}/documents", + "description": "Delete all documents for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ] + }, + "list": { + "id": "analyzer-api-routes.api.scans.documents.list", + "httpMethod": "GET", + "path": "api/scans/{id}/documents", + "description": "List documents for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "DocumentListResponse" + } + } + } + }, + "overview": { + "methods": { + "get": { + "id": "analyzer-api-routes.api.scans.overview.get", + "httpMethod": "GET", + "path": "api/scans/{scan_id}/overview/{analysis_id}", + "description": "Returns an overview of one analysis.", + "parameters": { + "analysis_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Analysis ID" + }, + "scan_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "scan_id", + "analysis_id" + ], + "response": { + "$ref": "AnalysisOverview" + } + }, + "list": { + "id": "analyzer-api-routes.api.scans.overview.list", + "httpMethod": "GET", + "path": "api/scans/{id}/overview", + "description": "Returns an aggregated overview of all analysis executed for one scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "ScanOverview" + } + } + } + }, + "report": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.report.list", + "httpMethod": "GET", + "path": "api/scans/{id}/report", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ] + } + } + }, + "results": { + "methods": { + "get": { + "id": "analyzer-api-routes.api.scans.results.get", + "httpMethod": "GET", + "path": "api/scans/{scan_id}/results/{analysis_id}", + "description": "Retrieve the results of one specific analysis of a scan.", + "parameters": { + "analysis_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Analysis ID" + }, + "page": { + "type": "integer", + "required": false, + "location": "query", + "description": "Page number (must be > 0). If provided, `per-page` must also be provided.", + "format": "int32" + }, + "per-page": { + "type": "integer", + "required": false, + "location": "query", + "description": "Items per page (must be > 0). If provided, `page` must also be provided.", + "format": "int32" + }, + "query": { + "type": "string", + "required": true, + "location": "query", + "description": "Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types." + }, + "scan_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "scan_id", + "analysis_id" + ], + "response": { + "$ref": "AnalysisResultDTO" + } + } + } + }, + "sbom": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.sbom.list", + "httpMethod": "GET", + "path": "api/scans/{id}/sbom", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": [ + "id" + ] + } + } + }, + "score": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.score.list", + "httpMethod": "GET", + "path": "api/scans/{id}/score", + "description": "Returns a security score of all successful finished analyses with their individual scores included.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "ScanScore" + } + } + } + }, + "status": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.status.list", + "httpMethod": "GET", + "path": "api/scans/{id}/status", + "description": "Returns the status of a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": [ + "id" + ], + "response": { + "$ref": "ScanStatus" + } + } + } + }, + "types": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.types.list", + "httpMethod": "GET", + "path": "api/scans/types", + "description": "Returns a list of all available analysis types for each different image." + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/src/api/executor.rs b/src/api/executor.rs new file mode 100644 index 0000000..5cdd0fc --- /dev/null +++ b/src/api/executor.rs @@ -0,0 +1,85 @@ +//! Generic API executor for discovery-driven methods. +//! +//! Substitutes path parameters, collects query parameters, and dispatches +//! HTTP requests through the existing [`AnalyzerClient`]. + +use anyhow::{Context, Result}; + +use crate::client::AnalyzerClient; +use crate::discovery::DiscoveryMethod; +use crate::output::Format; + +/// Execute a single discovery method against the API. +/// +/// `client` is `None` when `dry_run` is true (no auth required). +pub async fn execute_method( + client: Option<&AnalyzerClient>, + method: &DiscoveryMethod, + params_json: Option<&str>, + body_json: Option<&str>, + dry_run: bool, + _format: Format, +) -> Result<()> { + let params: serde_json::Map = match params_json { + Some(s) => serde_json::from_str(s).context("invalid --params JSON")?, + None => serde_json::Map::new(), + }; + + // Substitute path parameters: `api/scans/{id}/score` → `api/scans/abc-123/score` + let mut url_path = method.path.clone(); + for (name, param_def) in &method.parameters { + if param_def.location == "path" { + let value = params + .get(name) + .map(|v| match v { + serde_json::Value::String(s) => s.clone(), + other => other.to_string().trim_matches('"').to_string(), + }) + .or_else(|| param_def.default.clone()) + .with_context(|| format!("required path parameter '{name}' not provided"))?; + url_path = url_path.replace(&format!("{{{name}}}"), &value); + } + } + + // Collect query parameters + let mut query_params: Vec<(String, String)> = Vec::new(); + for (name, param_def) in &method.parameters { + if param_def.location == "query" { + if let Some(value) = params.get(name) { + let val_str = match value { + serde_json::Value::String(s) => s.clone(), + other => other.to_string(), + }; + query_params.push((name.clone(), val_str)); + } + } + } + + // Parse request body + let body: Option = match body_json { + Some(s) => Some(serde_json::from_str(s).context("invalid --json body")?), + None => None, + }; + + if dry_run { + let base = client.map(|c| c.base_url().as_str()).unwrap_or("/"); + println!("{} {base}{url_path}", method.http_method); + if !query_params.is_empty() { + for (k, v) in &query_params { + println!(" ?{k}={v}"); + } + } + if let Some(b) = &body { + println!("{}", serde_json::to_string_pretty(b)?); + } + return Ok(()); + } + + let client = client.context("API client required for non-dry-run execution")?; + let response = client + .execute_raw(&method.http_method, &url_path, &query_params, body.as_ref()) + .await?; + + println!("{}", serde_json::to_string_pretty(&response)?); + Ok(()) +} diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 0000000..aab6901 --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1,147 @@ +//! Discovery-driven dynamic command tree. +//! +//! Builds a `clap::Command` tree at runtime from the Discovery Document's +//! resource hierarchy, then dispatches matched methods through the executor. + +pub mod executor; +pub mod generate_skills; +pub mod schema; + +use anyhow::{Context, Result}; + +use crate::config; +use crate::client::AnalyzerClient; +use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; +use crate::output::Format; + +/// Build the `api` clap command tree from the discovery document. +/// +/// Skips the top-level `"api"` resource wrapper since the CLI already +/// prefixes with `analyzer api ...`. +pub fn build_api_command(doc: &DiscoveryDocument) -> clap::Command { + let api_resource = doc + .resources + .get("api") + .expect("discovery document must have an 'api' resource"); + + let mut cmd = clap::Command::new("api") + .about("Discovery-driven API access (dynamically generated)") + .arg_required_else_help(true) + .arg( + clap::Arg::new("params") + .long("params") + .help("Path and query parameters as JSON") + .global(true), + ) + .arg( + clap::Arg::new("json") + .long("json") + .help("Request body as JSON (for POST/PUT/PATCH)") + .global(true), + ) + .arg( + clap::Arg::new("dry-run") + .long("dry-run") + .action(clap::ArgAction::SetTrue) + .help("Print the request without sending it") + .global(true), + ); + + cmd = add_resource_subcommands(cmd, api_resource); + cmd +} + +/// Recursively add subcommands from the resource tree. +/// Names and descriptions are cloned to owned `String`s because clap requires `'static`. +fn add_resource_subcommands( + mut parent: clap::Command, + resource: &DiscoveryResource, +) -> clap::Command { + for (method_name, method) in &resource.methods { + let about = method.description.clone().unwrap_or_default(); + let leaf = clap::Command::new(method_name.clone()).about(about); + parent = parent.subcommand(leaf); + } + + for (resource_name, child_resource) in &resource.resources { + let mut child_cmd = + clap::Command::new(resource_name.clone()).arg_required_else_help(true); + child_cmd = add_resource_subcommands(child_cmd, child_resource); + parent = parent.subcommand(child_cmd); + } + + parent +} + +/// Dispatch a matched `api` command to the executor. +/// +/// Client creation is deferred so that `--dry-run` works without auth. +pub async fn dispatch( + doc: &DiscoveryDocument, + matches: &clap::ArgMatches, + api_key: Option<&str>, + url: Option<&str>, + profile: Option<&str>, + format: Format, +) -> Result<()> { + let (path, leaf_matches) = extract_subcommand_path(matches); + + let api_resource = doc + .resources + .get("api") + .context("discovery document must have 'api' resource")?; + + let path_refs: Vec<&str> = path.iter().map(|s| s.as_str()).collect(); + let method = discovery::resolve_method(api_resource, &path_refs) + .with_context(|| format!("no method found at path: {}", path.join(".")))?; + + let params_json = get_global_arg(leaf_matches, matches, "params"); + let body_json = get_global_arg(leaf_matches, matches, "json"); + let dry_run = leaf_matches.get_flag("dry-run") || matches.get_flag("dry-run"); + + if dry_run { + return executor::execute_method( + None, + method, + params_json.as_deref(), + body_json.as_deref(), + true, + format, + ) + .await; + } + + let cfg = config::resolve(api_key, url, profile)?; + let client = AnalyzerClient::new(cfg.url, &cfg.api_key)?; + executor::execute_method( + Some(&client), + method, + params_json.as_deref(), + body_json.as_deref(), + false, + format, + ) + .await +} + +/// Extract the subcommand path by walking ArgMatches recursively. +fn extract_subcommand_path(matches: &clap::ArgMatches) -> (Vec, &clap::ArgMatches) { + let mut path = Vec::new(); + let mut current = matches; + while let Some((name, sub_matches)) = current.subcommand() { + path.push(name.to_string()); + current = sub_matches; + } + (path, current) +} + +/// Get a global arg that may be on the leaf or any parent matches. +fn get_global_arg( + leaf: &clap::ArgMatches, + parent: &clap::ArgMatches, + name: &str, +) -> Option { + leaf.get_one::(name) + .or_else(|| parent.get_one::(name)) + .cloned() +} diff --git a/src/api/schema.rs b/src/api/schema.rs new file mode 100644 index 0000000..68f52bb --- /dev/null +++ b/src/api/schema.rs @@ -0,0 +1,104 @@ +//! Schema introspection command. +//! +//! `analyzer schema api.scans.score.list` dumps the method signature +//! as machine-readable JSON (httpMethod, path, parameters, request/response refs). + +use anyhow::{Context, Result, bail}; +use serde::Serialize; + +use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; + +/// Handle `analyzer schema `. +pub fn handle_schema_command(doc: &DiscoveryDocument, dotted_path: &str) -> Result<()> { + let segments: Vec<&str> = dotted_path.split('.').collect(); + + if segments.is_empty() { + bail!("path cannot be empty"); + } + + // Path must start with "api" — we resolve from resources["api"] + if segments[0] != "api" { + bail!("path must start with 'api' (e.g. api.scans.score.list)"); + } + + let api_resource = doc + .resources + .get("api") + .context("discovery document has no 'api' resource")?; + + let rest = &segments[1..]; + + if rest.is_empty() { + print_resource_tree(api_resource, 0); + return Ok(()); + } + + // Try to resolve as a method first + if let Some(method) = discovery::resolve_method(api_resource, rest) { + let output = serde_json::to_string_pretty(&MethodSchema::from(method))?; + println!("{output}"); + return Ok(()); + } + + // Try to resolve as a resource and list its contents + if let Some(resource) = discovery::resolve_resource(api_resource, rest) { + print_resource_tree(resource, 0); + return Ok(()); + } + + bail!( + "path '{}' not found in discovery document", + segments.join(".") + ); +} + +/// Serializable view of a method for schema output. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct MethodSchema<'a> { + id: &'a str, + http_method: &'a str, + path: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + description: Option<&'a str>, + parameters: &'a std::collections::BTreeMap, + parameter_order: &'a [String], + #[serde(skip_serializing_if = "Option::is_none")] + request: Option<&'a crate::discovery::SchemaRef>, + #[serde(skip_serializing_if = "Option::is_none")] + response: Option<&'a crate::discovery::SchemaRef>, +} + +impl<'a> From<&'a crate::discovery::DiscoveryMethod> for MethodSchema<'a> { + fn from(m: &'a crate::discovery::DiscoveryMethod) -> Self { + Self { + id: &m.id, + http_method: &m.http_method, + path: &m.path, + description: m.description.as_deref(), + parameters: &m.parameters, + parameter_order: &m.parameter_order, + request: m.request.as_ref(), + response: m.response.as_ref(), + } + } +} + +/// Print the resource tree for navigation. +fn print_resource_tree(resource: &DiscoveryResource, indent: usize) { + let pad = " ".repeat(indent); + for (name, method) in &resource.methods { + let desc = method + .description + .as_deref() + .unwrap_or("") + .chars() + .take(60) + .collect::(); + println!("{pad}{name} ({}) — {desc}", method.http_method); + } + for (name, child) in &resource.resources { + println!("{pad}{name}/"); + print_resource_tree(child, indent + 1); + } +} diff --git a/src/client/mod.rs b/src/client/mod.rs index 9423ed4..6af3465 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -6,7 +6,7 @@ use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; -use anyhow::{Result, bail}; +use anyhow::{Context as _, Result, bail}; use futures::Stream; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; use reqwest::{Body, Client, header, multipart}; @@ -244,6 +244,58 @@ impl AnalyzerClient { Self::bytes(resp).await } + // -- Generic execution (discovery-driven commands) ------------------------- + + /// Execute a raw API request from discovery method metadata. + /// Returns the response as a JSON value, or `{"status": "ok"}` for empty bodies. + pub async fn execute_raw( + &self, + http_method: &str, + path: &str, + query_params: &[(String, String)], + body: Option<&serde_json::Value>, + ) -> Result { + let mut url = self.base_url.join(path)?; + for (key, val) in query_params { + url.query_pairs_mut().append_pair(key, val); + } + + let builder = match http_method.to_uppercase().as_str() { + "GET" => self.client.get(url), + "POST" => self.client.post(url), + "PUT" => self.client.put(url), + "DELETE" => self.client.delete(url), + "PATCH" => self.client.patch(url), + other => bail!("unsupported HTTP method: {other}"), + }; + + let builder = if let Some(body) = body { + builder.json(body) + } else { + builder + }; + + let resp = builder.send().await?; + let status = resp.status(); + + if status.is_success() { + let text = resp.text().await?; + if text.is_empty() { + Ok(serde_json::json!({"status": "ok"})) + } else { + serde_json::from_str(&text).context("response is not valid JSON") + } + } else { + let body = resp.text().await.unwrap_or_default(); + bail!("API error (HTTP {status}): {body}"); + } + } + + /// Expose the base URL for dry-run output. + pub fn base_url(&self) -> &Url { + &self.base_url + } + // -- Response helpers ----------------------------------------------------- async fn json(resp: reqwest::Response) -> Result { diff --git a/src/discovery.rs b/src/discovery.rs new file mode 100644 index 0000000..bd71620 --- /dev/null +++ b/src/discovery.rs @@ -0,0 +1,205 @@ +//! Discovery Document models and loader. +//! +//! Reads a Google Discovery-style JSON document (produced by `openapi-to-discovery`) +//! and provides lookup helpers for resolving methods from the nested resource tree. + +use std::collections::BTreeMap; +use std::path::PathBuf; + +use anyhow::{Context, Result, bail}; +use serde::{Deserialize, Serialize}; + +// --------------------------------------------------------------------------- +// Serde models +// --------------------------------------------------------------------------- + +/// Top-level Discovery Document. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct DiscoveryDocument { + pub name: String, + pub version: String, + pub title: String, + pub description: Option, + pub root_url: String, + pub service_path: String, + #[serde(default)] + pub schemas: BTreeMap, + #[serde(default)] + pub resources: BTreeMap, +} + +/// A recursive resource node containing methods and child resources. +#[derive(Debug, Clone, Default, Deserialize)] +pub struct DiscoveryResource { + #[serde(default)] + pub methods: BTreeMap, + #[serde(default)] + pub resources: BTreeMap, +} + +/// A single API method. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct DiscoveryMethod { + pub id: String, + pub http_method: String, + pub path: String, + pub description: Option, + #[serde(default)] + pub parameters: BTreeMap, + #[serde(default)] + pub parameter_order: Vec, + pub request: Option, + pub response: Option, + #[serde(default)] + pub scopes: Vec, +} + +/// Parameter definition. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DiscoveryParameter { + #[serde(rename = "type")] + pub param_type: String, + pub required: bool, + pub location: String, + pub description: Option, + pub format: Option, + #[serde(rename = "enum")] + pub enum_values: Option>, + pub default: Option, +} + +/// Schema reference (e.g. `{"$ref": "ScanScore"}`). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SchemaRef { + #[serde(rename = "$ref")] + pub ref_name: String, +} + +// --------------------------------------------------------------------------- +// Loading +// --------------------------------------------------------------------------- + +/// Where the discovery document comes from. +pub enum DiscoverySource { + File(PathBuf), + Url(String), +} + +/// Determine the discovery source from the `--discovery` flag or env var. +pub fn resolve_source(flag: Option<&str>) -> Result { + let value = flag + .map(String::from) + .or_else(|| std::env::var("ANALYZER_DISCOVERY_URL").ok()); + + match value { + Some(v) if v.starts_with("http://") || v.starts_with("https://") => { + Ok(DiscoverySource::Url(v)) + } + Some(v) => Ok(DiscoverySource::File(PathBuf::from(v))), + None => bail!( + "no discovery document specified\n\n\ + Provide one with:\n \ + analyzer --discovery api ...\n \ + export ANALYZER_DISCOVERY_URL=" + ), + } +} + +/// Load and parse the discovery document from the resolved source. +pub async fn load(source: &DiscoverySource) -> Result { + let json_str = match source { + DiscoverySource::File(path) => std::fs::read_to_string(path) + .with_context(|| format!("failed to read discovery file: {}", path.display()))?, + DiscoverySource::Url(url) => reqwest::get(url) + .await + .with_context(|| format!("failed to fetch discovery document from {url}"))? + .text() + .await?, + }; + serde_json::from_str(&json_str).context("failed to parse discovery document") +} + +// --------------------------------------------------------------------------- +// Lookup helpers +// --------------------------------------------------------------------------- + +/// Resolve a method by walking the resource tree with a path like `["scans", "score", "list"]`. +pub fn resolve_method<'a>( + resource: &'a DiscoveryResource, + segments: &[&str], +) -> Option<&'a DiscoveryMethod> { + match segments { + [] => None, + [method_name] => resource.methods.get(*method_name), + [resource_name, rest @ ..] => resource + .resources + .get(*resource_name) + .and_then(|child| resolve_method(child, rest)), + } +} + +/// Resolve a resource by walking the tree (for schema introspection of intermediate nodes). +pub fn resolve_resource<'a>( + resource: &'a DiscoveryResource, + segments: &[&str], +) -> Option<&'a DiscoveryResource> { + match segments { + [] => Some(resource), + [name, rest @ ..] => resource + .resources + .get(*name) + .and_then(|child| resolve_resource(child, rest)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn loads_analyzer_discovery_json() { + let doc: DiscoveryDocument = + serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) + .unwrap(); + assert_eq!(doc.name, "analyzer-api-routes"); + assert!(!doc.resources.is_empty()); + assert!(!doc.schemas.is_empty()); + } + + #[test] + fn resolve_method_finds_nested() { + let doc: DiscoveryDocument = + serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) + .unwrap(); + let api = doc.resources.get("api").unwrap(); + let method = resolve_method(api, &["scans", "score", "list"]); + assert!(method.is_some()); + let m = method.unwrap(); + assert_eq!(m.http_method, "GET"); + assert!(m.path.contains("score")); + } + + #[test] + fn resolve_method_returns_none_for_bad_path() { + let doc: DiscoveryDocument = + serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) + .unwrap(); + let api = doc.resources.get("api").unwrap(); + assert!(resolve_method(api, &["nonexistent", "method"]).is_none()); + } + + #[test] + fn resolve_resource_finds_intermediate() { + let doc: DiscoveryDocument = + serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) + .unwrap(); + let api = doc.resources.get("api").unwrap(); + let scans = resolve_resource(api, &["scans"]); + assert!(scans.is_some()); + assert!(scans.unwrap().methods.contains_key("list")); + } +} diff --git a/src/main.rs b/src/main.rs index a3ea2d3..0a2b727 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,9 +3,11 @@ //! Scan firmware and container images for vulnerabilities, generate SBOMs, //! check CRA compliance, and more. +mod api; mod client; mod commands; mod config; +mod discovery; mod output; use std::path::PathBuf; @@ -50,6 +52,10 @@ struct Cli { #[arg(long, global = true, value_enum, default_value_t = Format::Human)] format: Format, + /// Path or URL to a discovery.json document for the `api` and `schema` subcommands. + #[arg(long, global = true, env = "ANALYZER_DISCOVERY_URL")] + discovery: Option, + #[command(subcommand)] command: Command, } @@ -87,6 +93,29 @@ enum Command { #[arg(value_enum)] shell: clap_complete::Shell, }, + + /// Discovery-driven API access — dynamically generated from a discovery document. + /// + /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + Api { + /// Arguments passed to the dynamic command tree. + #[arg(trailing_var_arg = true, allow_hyphen_values = true, num_args = 0..)] + args: Vec, + }, + + /// Introspect method signatures from the discovery document. + /// + /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + Schema { + /// Dotted path to introspect (e.g. "api.scans.score.list"). + path: String, + }, + + /// Generate skill files from the discovery document. + /// + /// Reads the discovery document and writes markdown skill files to `skills/`. + /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + GenerateSkills, } // -- Config subcommands ------------------------------------------------------- @@ -355,11 +384,12 @@ async fn main() -> ExitCode { } async fn run(cli: Cli) -> Result<()> { - // Extract auth fields before moving cli.command + // Extract fields before moving cli.command let api_key = cli.api_key; let url = cli.url; let profile = cli.profile; let format = cli.format; + let discovery_flag = cli.discovery; match cli.command { // -- Auth (no API key required) ----------------------------------- @@ -504,6 +534,49 @@ async fn run(cli: Cli) -> Result<()> { } } } + + // -- Discovery-driven commands (agent mode) ----------------------- + Command::Api { args } => { + let source = discovery::resolve_source(discovery_flag.as_deref())?; + let doc = discovery::load(&source).await?; + let api_cmd = api::build_api_command(&doc); + let api_matches = match api_cmd + .try_get_matches_from(std::iter::once("api".to_string()).chain(args)) + { + Ok(m) => m, + Err(e) => { + // Let clap handle --help and --version display directly + e.exit(); + } + }; + // Client creation is deferred — dispatch will call make_client only + // if the request isn't a dry-run. + api::dispatch( + &doc, + &api_matches, + api_key.as_deref(), + url.as_deref(), + profile.as_deref(), + format, + ) + .await + } + + Command::Schema { path } => { + let source = discovery::resolve_source(discovery_flag.as_deref())?; + let doc = discovery::load(&source).await?; + api::schema::handle_schema_command(&doc, &path) + } + + Command::GenerateSkills => { + let source = discovery::resolve_source(discovery_flag.as_deref())?; + let doc = discovery::load(&source).await?; + let skills_dir = std::path::Path::new("skills"); + println!("Generating skills from discovery document..."); + api::generate_skills::generate(&doc, skills_dir)?; + println!("Done."); + Ok(()) + } } } From 5ae662ccca7b2ebfed2a45b7f07667d82ec12ff1 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 14:58:52 +0100 Subject: [PATCH 02/38] feat(dommyrock-analyzer-cli): context and skill generation --- .github/workflows/update-discovery.yml | 67 ++++ CLAUDE.md | 1 + CONTEXT.md | 186 ++++++++++ README.md | 72 ++++ gemini-extension.json | 6 + src/api/generate_skills.rs | 462 +++++++++++++++++++++++++ 6 files changed, 794 insertions(+) create mode 100644 .github/workflows/update-discovery.yml create mode 100644 CLAUDE.md create mode 100644 CONTEXT.md create mode 100644 gemini-extension.json create mode 100644 src/api/generate_skills.rs diff --git a/.github/workflows/update-discovery.yml b/.github/workflows/update-discovery.yml new file mode 100644 index 0000000..9c98caa --- /dev/null +++ b/.github/workflows/update-discovery.yml @@ -0,0 +1,67 @@ +name: Update Discovery Document + +on: + schedule: + - cron: '0 * * * *' # hourly + workflow_dispatch: {} # manual trigger + +permissions: + contents: write + pull-requests: write + +jobs: + update: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Fetch latest discovery.json + run: | + curl -sf https://analyzer.exein.dev/discovery.json \ + -H "Authorization: Bearer ${{ secrets.ANALYZER_API_KEY }}" \ + -o analyzer-discovery.json.new + + - name: Check for changes + id: diff + run: | + if diff -q analyzer-discovery.json analyzer-discovery.json.new > /dev/null 2>&1; then + echo "changed=false" >> "$GITHUB_OUTPUT" + rm analyzer-discovery.json.new + else + echo "changed=true" >> "$GITHUB_OUTPUT" + mv analyzer-discovery.json.new analyzer-discovery.json + fi + + - name: Install Rust toolchain + if: steps.diff.outputs.changed == 'true' + uses: dtolnay/rust-toolchain@stable + + - name: Cache cargo + if: steps.diff.outputs.changed == 'true' + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }} + + - name: Regenerate skills + if: steps.diff.outputs.changed == 'true' + run: | + cargo build --release + ./target/release/analyzer --discovery ./analyzer-discovery.json generate-skills + + - name: Create PR + if: steps.diff.outputs.changed == 'true' + uses: peter-evans/create-pull-request@v6 + with: + title: "update: Discovery Document + skills" + body: | + Auto-generated from upstream API changes. + + The discovery document at `analyzer-discovery.json` has changed. + Skill files in `skills/` have been regenerated. + branch: update-discovery + commit-message: "update: discovery.json + regenerated skills" + delete-branch: true diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..e58f2d2 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +When using or contributing to this repository, follow the guidelines in [CONTEXT.md](CONTEXT.md). diff --git a/CONTEXT.md b/CONTEXT.md new file mode 100644 index 0000000..59e7a9e --- /dev/null +++ b/CONTEXT.md @@ -0,0 +1,186 @@ +# Analyzer CLI (`analyzer`) Context + +The `analyzer` CLI provides dynamic access to a firmware and software image security analysis API by parsing a Discovery Document at runtime. It manages objects (firmware images), schedules security scans, retrieves vulnerability findings (CVE, hardening, crypto, SBOM, malware, kernel), checks compliance (Cyber Resilience Act), and generates reports — all driven from a single `discovery.json` with no hardcoded commands. + +## Rules of Engagement for Agents + +* **Schema Discovery:** *If you don't know the exact JSON payload structure, run `analyzer schema .` first to inspect the schema before executing.* +* **Context Window Protection:** *Scan results and overview responses can be large. ALWAYS use `--fields` when listing or getting resources to avoid overwhelming your context window.* +* **Dry-Run Safety:** *Always use the `--dry-run` flag for mutating operations (create, update, delete) to validate your JSON payload before actual execution.* +* **Poll, Don't Guess:** *After scheduling a scan, poll `analyzer api scans status list` until it completes. Do not assume timing or make further requests against incomplete scans.* +* **One Step at a Time:** *Verify each step succeeded (exit code 0, valid JSON response) before proceeding to the next.* + +## Core Syntax + +```bash +analyzer api [sub-resource...] [flags] +``` + +Use `--help` to get help on the available commands. + +```bash +analyzer --help +analyzer api --help +analyzer api objects --help +analyzer api scans --help +analyzer api scans overview --help +analyzer api scans compliance-check cyber-resilience-act --help +analyzer api scans compliance-check cyber-resilience-act ai-suggestion --help +``` + +### Key Flags + +- `--params ''`: Path and query parameters (e.g., `id`, `limit`, `page`, `per-page`, `query`). +- `--json ''`: Request body for POST/PUT/PATCH methods. Must match the schema exactly. +- `--fields ''`: Limits the response fields (critical for AI context window efficiency). +- `--dry-run`: Validates and prints the request without executing. Use before every mutation. +- `--output json|table`: Output format. Default: `json`. Agents should always use `json`. + +## Usage Patterns + +### 1. Reading Data (GET/LIST) + +Always use `--fields` to minimize tokens. + +```bash +# List objects with field mask +analyzer api objects list --params '{"limit": 10}' --fields "id,name,tags" + +# Get a single object +analyzer api objects get --params '{"id": "OBJ_ID"}' + +# Get scan details +analyzer api scans get --params '{"id": "SCAN_ID"}' + +# Check service health +analyzer api health list +``` + +### 2. Writing Data (POST/PUT/PATCH) + +Use `--json` for the request body. Always `--dry-run` first. + +```bash +# Create an object (firmware image) +analyzer api objects create --json '{"name": "Router FW v2.1", "description": "Edge router firmware", "tags": ["router", "v2.1"]}' --dry-run + +# Update an object +analyzer api objects update --params '{"id": "OBJ_ID"}' --json '{"name": "Router FW v2.1.1", "favorite": true}' --dry-run + +# Schedule a new scan +analyzer api scans create --json '{...}' --dry-run +``` + +### 3. Deleting Data + +Always `--dry-run` first. Deletions are irreversible. + +```bash +# Delete an object and all its scans +analyzer api objects delete --params '{"id": "OBJ_ID"}' --dry-run + +# Delete a scan +analyzer api scans delete --params '{"id": "SCAN_ID"}' --dry-run + +# Delete a single document +analyzer api scans documents delete --params '{"id": "SCAN_ID", "file_name": "datasheet.pdf"}' --dry-run + +# Delete all documents for a scan +analyzer api scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +``` + +### 4. Schema Introspection + +If unsure about parameters or body structure, check the schema: + +```bash +analyzer schema api.objects.create +analyzer schema api.scans.create +analyzer schema api.scans.results.get +analyzer schema api.scans.compliance-check.cyber-resilience-act.list +``` + +### 5. Scan Lifecycle (Poll-Based) + +Scans are asynchronous. Schedule, poll, then retrieve results. + +```bash +# Schedule +analyzer api scans create --json '{...}' + +# Poll status until complete +analyzer api scans status list --params '{"id": "SCAN_ID"}' +# → {"id": "SCAN_ID", "status": "running"} +# → {"id": "SCAN_ID", "status": "finished"} + +# Cancel if needed +analyzer api scans cancel create --params '{"id": "SCAN_ID"}' +``` + +### 6. Scan Results and Scoring + +```bash +# Aggregated overview (CVE, hardening, crypto, SBOM, malware, kernel, etc.) +analyzer api scans overview list --params '{"id": "SCAN_ID"}' + +# Single analysis overview +analyzer api scans overview get --params '{"scan_id": "SCAN_ID", "analysis_id": "ANALYSIS_ID"}' + +# Security score +analyzer api scans score list --params '{"id": "SCAN_ID"}' + +# Detailed findings with pagination and filtering +analyzer api scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve", "page": 1, "per-page": 50}' + +# SBOM export +analyzer api scans sbom list --params '{"id": "SCAN_ID"}' + +# Full report download +analyzer api scans report list --params '{"id": "SCAN_ID"}' +``` + +### 7. Compliance — Cyber Resilience Act (CRA) + +```bash +# Get CRA compliance report +analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' + +# Download CRA report as PDF +analyzer api scans compliance-check cyber-resilience-act report list --params '{"id": "SCAN_ID"}' + +# Overwrite a requirement (manual assessment) +analyzer api scans compliance-check cyber-resilience-act overwrite overwrite_compliance_check_requirement \ + --params '{"id": "SCAN_ID"}' --json '{...}' --dry-run + +# Trigger AI suggestion +analyzer api scans compliance-check cyber-resilience-act ai-suggestion begin create --params '{"id": "SCAN_ID"}' + +# Poll AI suggestion status +analyzer api scans compliance-check cyber-resilience-act ai-suggestion status list --params '{"id": "SCAN_ID"}' +``` + +### 8. Documents (Scan Attachments) + +```bash +# List documents for a scan +analyzer api scans documents list --params '{"id": "SCAN_ID"}' + +# Upload a document +analyzer api scans documents create --params '{"id": "SCAN_ID"}' + +# Delete a single document +analyzer api scans documents delete --params '{"id": "SCAN_ID", "file_name": "FILE"}' --dry-run + +# Delete all documents +analyzer api scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +``` + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{"error": {"code": 404, "message": "Object not found"}} +``` + +Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. diff --git a/README.md b/README.md index 963e8ed..7b7892c 100644 --- a/README.md +++ b/README.md @@ -273,6 +273,78 @@ Settings are resolved in this order (highest priority first): | `docker` | info, cve, password-hash, crypto, software-bom, malware, hardening, capabilities | | `idf` | info, cve, software-bom, symbols, tasks, stack-overflow | +## Agent mode + +The CLI includes a discovery-driven layer designed for AI agents. While the commands above are human-friendly (named flags, progress bars, `--wait`), the agent layer exposes the full API surface dynamically from a [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) — no hardcoded commands. + +Agents read [CONTEXT.md](CONTEXT.md) at session start for syntax, rules, and examples. + +### Setup + +Point the CLI at a discovery document (local file or URL): + +```bash +# Via flag +analyzer --discovery ./analyzer-discovery.json api objects list + +# Via environment variable +export ANALYZER_DISCOVERY_URL=https://analyzer.exein.dev/discovery.json +analyzer api scans list +``` + +### `analyzer api` — dynamic API access + +All API resources and methods are generated at runtime from the discovery document: + +```bash +# List objects +analyzer api objects list --params '{"limit": 10}' + +# Create a scan (dry-run first) +analyzer api scans create --json '{"name": "test", ...}' --dry-run + +# Get scan results with pagination +analyzer api scans results get --params '{"scan_id": "ID", "analysis_id": "cve", "page": 1}' + +# Check CRA compliance +analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' +``` + +**Flags for `api` commands:** + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--dry-run` | Print the request without executing | + +### `analyzer schema` — introspect method signatures + +Dump the full method signature (HTTP method, path, parameters, request/response schemas) as JSON: + +```bash +# Inspect a specific method +analyzer schema api.scans.create + +# Browse available methods under a resource +analyzer schema api.scans + +# Full tree +analyzer schema api +``` + +### `analyzer generate-skills` — generate skill files + +Reads the discovery document and writes markdown skill files to `skills/`: + +```bash +analyzer --discovery ./analyzer-discovery.json generate-skills +ls skills/ +# analyzer-objects/ analyzer-scans/ analyzer-shared/ +``` + +See [CONTEXT.md](CONTEXT.md) for the full agent reference. + ## License Apache-2.0 diff --git a/gemini-extension.json b/gemini-extension.json new file mode 100644 index 0000000..7903cdf --- /dev/null +++ b/gemini-extension.json @@ -0,0 +1,6 @@ +{ + "name": "analyzer-cli", + "version": "0.2.0", + "description": "CLI for firmware and software image security analysis, with discovery-driven agent mode.", + "contextFileName": "CONTEXT.md" +} diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs new file mode 100644 index 0000000..77cc9e3 --- /dev/null +++ b/src/api/generate_skills.rs @@ -0,0 +1,462 @@ +//! Skill file generator. +//! +//! `analyzer generate-skills` reads the discovery document and writes +//! markdown skill files to `skills/` — one per top-level API resource, +//! plus a shared skill for global flags, auth, and error handling. +//! +//! Follows the same pattern as the gws CLI's `generate-skills` command: +//! YAML frontmatter with `metadata.openclaw`, prerequisite notes, +//! "Discovering Commands" section, and a skills index. + +use std::fmt::Write as FmtWrite; +use std::path::Path; + +use anyhow::{Context, Result}; + +use crate::discovery::{DiscoveryDocument, DiscoveryMethod, DiscoveryResource}; + +struct SkillIndexEntry { + name: String, + description: String, +} + +/// Generate skill files from the discovery document. +/// +/// Writes: +/// - `skills/analyzer-/SKILL.md` for each top-level resource under `api` +/// - `skills/analyzer-shared/SKILL.md` for global flags, auth, and error handling +/// - `docs/skills.md` skills index +pub fn generate(doc: &DiscoveryDocument, output_dir: &Path) -> Result<()> { + let api_resource = doc + .resources + .get("api") + .context("discovery document must have an 'api' resource")?; + + std::fs::create_dir_all(output_dir) + .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; + + let mut index: Vec = Vec::new(); + + // Generate the shared skill first (others reference it) + let shared_dir = output_dir.join("analyzer-shared"); + std::fs::create_dir_all(&shared_dir)?; + let shared_content = render_shared_skill(&doc.version); + let shared_path = shared_dir.join("SKILL.md"); + std::fs::write(&shared_path, &shared_content) + .with_context(|| format!("failed to write {}", shared_path.display()))?; + println!(" wrote {}", shared_path.display()); + index.push(SkillIndexEntry { + name: "analyzer-shared".to_string(), + description: "Shared patterns for authentication, global flags, and error handling." + .to_string(), + }); + + // Generate one skill per top-level resource + for (resource_name, resource) in &api_resource.resources { + let skill_name = format!("analyzer-{resource_name}"); + let skill_dir = output_dir.join(&skill_name); + std::fs::create_dir_all(&skill_dir)?; + + let content = render_resource_skill(resource_name, resource, &doc.version); + let skill_path = skill_dir.join("SKILL.md"); + std::fs::write(&skill_path, &content) + .with_context(|| format!("failed to write {}", skill_path.display()))?; + + println!(" wrote {}", skill_path.display()); + + let description = match resource_name.as_str() { + "objects" => "Manage firmware/software objects (CRUD operations).".to_string(), + "scans" => { + "Manage scans, results, compliance checks, reports, and documents.".to_string() + } + "health" => "Check API service health.".to_string(), + _ => format!("API operations for {resource_name}."), + }; + index.push(SkillIndexEntry { + name: skill_name, + description, + }); + } + + // Write skills index + write_skills_index(&index)?; + + Ok(()) +} + +/// Write `docs/skills.md` index file. +fn write_skills_index(entries: &[SkillIndexEntry]) -> Result<()> { + let mut out = String::new(); + writeln!(out, "# Skills Index").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "> Auto-generated by `analyzer generate-skills`. Do not edit manually." + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "| Skill | Description |").unwrap(); + writeln!(out, "|-------|-------------|").unwrap(); + for entry in entries { + writeln!( + out, + "| [{}](../skills/{}/SKILL.md) | {} |", + entry.name, entry.name, entry.description + ) + .unwrap(); + } + writeln!(out).unwrap(); + + let docs_dir = Path::new("docs"); + std::fs::create_dir_all(docs_dir).context("failed to create docs directory")?; + let path = docs_dir.join("skills.md"); + std::fs::write(&path, &out) + .with_context(|| format!("failed to write {}", path.display()))?; + println!(" wrote {}", path.display()); + Ok(()) +} + +/// Render a SKILL.md for a single top-level resource (e.g. "objects", "scans"). +fn render_resource_skill(name: &str, resource: &DiscoveryResource, version: &str) -> String { + let mut out = String::new(); + + let description = match name { + "objects" => "Manage firmware/software objects (CRUD operations).", + "scans" => "Manage scans, results, compliance checks, reports, and documents.", + "health" => "Check API service health.", + _ => "API resource operations.", + }; + + // YAML frontmatter (matches gws pattern with metadata.openclaw) + writeln!(out, "---").unwrap(); + writeln!(out, "name: analyzer-{name}").unwrap(); + writeln!(out, "version: {version}").unwrap(); + writeln!(out, "description: \"{description}\"").unwrap(); + writeln!(out, "metadata:").unwrap(); + writeln!(out, " openclaw:").unwrap(); + writeln!(out, " category: \"security\"").unwrap(); + writeln!(out, " requires:").unwrap(); + writeln!(out, " bins: [\"analyzer\"]").unwrap(); + writeln!(out, " cliHelp: \"analyzer api {name} --help\"").unwrap(); + writeln!(out, "---").unwrap(); + writeln!(out).unwrap(); + + // Title + writeln!(out, "# analyzer {name}").unwrap(); + writeln!(out).unwrap(); + + // Prerequisite note (gws pattern) + writeln!( + out, + "> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling." + ) + .unwrap(); + writeln!(out).unwrap(); + + // Syntax + writeln!(out, "```bash").unwrap(); + writeln!(out, "analyzer api {name} [flags]").unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); + + // API Methods section + writeln!(out, "## API Methods").unwrap(); + writeln!(out).unwrap(); + + // Direct methods on this resource + render_method_list(&mut out, name, &resource.methods); + + // Sub-resources + render_sub_resources(&mut out, name, resource); + + // Flags section + writeln!(out, "## Flags").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "| Flag | Purpose |").unwrap(); + writeln!(out, "|------|---------|").unwrap(); + writeln!( + out, + "| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) |" + ) + .unwrap(); + writeln!( + out, + "| `--json ''` | Request body for POST/PUT/PATCH methods |" + ) + .unwrap(); + writeln!( + out, + "| `--dry-run` | Print the request without executing |" + ) + .unwrap(); + writeln!(out).unwrap(); + + // Discovering Commands section (gws pattern) + writeln!(out, "## Discovering Commands").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "Before calling any API method, inspect it:" + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```bash").unwrap(); + writeln!(out, "# Browse resources and methods").unwrap(); + writeln!(out, "analyzer api {name} --help").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "# Inspect a method's required params, types, and defaults" + ) + .unwrap(); + writeln!(out, "analyzer schema api.{name}.").unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "Use `analyzer schema` output to build your `--params` and `--json` flags." + ) + .unwrap(); + writeln!(out).unwrap(); + + // Examples section + writeln!(out, "## Examples").unwrap(); + writeln!(out).unwrap(); + render_examples(&mut out, name, resource); + + // See Also + writeln!(out, "## See Also").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth" + ) + .unwrap(); + writeln!( + out, + "- [CONTEXT.md](../../CONTEXT.md) — Full agent reference" + ) + .unwrap(); + writeln!(out).unwrap(); + + out +} + +/// Render methods as a list with descriptions (gws pattern: `- method — description`). +fn render_method_list( + out: &mut String, + _path_prefix: &str, + methods: &std::collections::BTreeMap, +) { + if methods.is_empty() { + return; + } + + for (method_name, method) in methods { + let desc = method + .description + .as_deref() + .unwrap_or("") + .lines() + .next() + .unwrap_or(""); + writeln!(out, " - `{method_name}` ({}) — {desc}", method.http_method).unwrap(); + } + writeln!(out).unwrap(); +} + +/// Render sub-resources recursively. +fn render_sub_resources(out: &mut String, path_prefix: &str, resource: &DiscoveryResource) { + for (sub_name, sub_resource) in &resource.resources { + let sub_path = format!("{path_prefix}.{sub_name}"); + + writeln!(out, "### {sub_name}").unwrap(); + writeln!(out).unwrap(); + + // Methods on this sub-resource + render_method_list(out, &sub_path, &sub_resource.methods); + + // Recurse into nested sub-resources + render_sub_resources(out, &sub_path, sub_resource); + } +} + +/// Render concrete examples for common operations. +fn render_examples(out: &mut String, name: &str, resource: &DiscoveryResource) { + match name { + "objects" => { + writeln!(out, "```bash").unwrap(); + writeln!(out, "# List all objects").unwrap(); + writeln!(out, "analyzer api objects list").unwrap(); + writeln!(out).unwrap(); + if resource.methods.contains_key("get") { + writeln!(out, "# Get a specific object").unwrap(); + writeln!( + out, + "analyzer api objects get --params '{{\"id\": \"OBJ_ID\"}}'" + ) + .unwrap(); + writeln!(out).unwrap(); + } + if resource.methods.contains_key("create") { + writeln!(out, "# Create an object (dry-run first)").unwrap(); + writeln!( + out, + "analyzer api objects create --json '{{\"name\": \"Router FW\", \"description\": \"Edge router firmware\"}}' --dry-run" + ).unwrap(); + writeln!(out).unwrap(); + } + if resource.methods.contains_key("delete") { + writeln!(out, "# Delete an object").unwrap(); + writeln!( + out, + "analyzer api objects delete --params '{{\"id\": \"OBJ_ID\"}}' --dry-run" + ) + .unwrap(); + } + writeln!(out, "```").unwrap(); + } + "scans" => { + writeln!(out, "```bash").unwrap(); + writeln!(out, "# List scans").unwrap(); + writeln!(out, "analyzer api scans list").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Schedule a scan (dry-run first)").unwrap(); + writeln!(out, "analyzer api scans create --json '{{...}}' --dry-run").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Poll scan status").unwrap(); + writeln!( + out, + "analyzer api scans status list --params '{{\"id\": \"SCAN_ID\"}}'" + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Get security score").unwrap(); + writeln!( + out, + "analyzer api scans score list --params '{{\"id\": \"SCAN_ID\"}}'" + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Get scan overview").unwrap(); + writeln!( + out, + "analyzer api scans overview list --params '{{\"id\": \"SCAN_ID\"}}'" + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Get CVE results").unwrap(); + writeln!( + out, + "analyzer api scans results get --params '{{\"scan_id\": \"SCAN_ID\", \"analysis_id\": \"cve\"}}'" + ).unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Check CRA compliance").unwrap(); + writeln!( + out, + "analyzer api scans compliance-check cyber-resilience-act list --params '{{\"id\": \"SCAN_ID\"}}'" + ).unwrap(); + writeln!(out, "```").unwrap(); + } + "health" => { + writeln!(out, "```bash").unwrap(); + writeln!(out, "# Check service health").unwrap(); + writeln!(out, "analyzer api health list").unwrap(); + writeln!(out, "```").unwrap(); + } + _ => { + writeln!(out, "```bash").unwrap(); + if resource.methods.contains_key("list") { + writeln!(out, "analyzer api {name} list").unwrap(); + } + if resource.methods.contains_key("get") { + writeln!( + out, + "analyzer api {name} get --params '{{\"id\": \"...\"}}'", + ) + .unwrap(); + } + writeln!(out, "```").unwrap(); + } + } + writeln!(out).unwrap(); +} + +/// Render the shared SKILL.md covering global flags, auth, and error handling. +fn render_shared_skill(version: &str) -> String { + format!( + r#"--- +name: analyzer-shared +version: {version} +description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- + +# analyzer — Shared Reference + +## Authentication + +```bash +# Interactive login (prompts for API key, validates, saves) +analyzer login + +# Environment variable +export ANALYZER_API_KEY="your-api-key" +``` + +## Global Flags + +| Flag | Description | +|------|-------------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--dry-run` | Validate and print request without executing | +| `--discovery ` | Discovery document location (also: `ANALYZER_DISCOVERY_URL` env var) | +| `--format ` | Output format: `human` (default), `json` | + +## CLI Syntax + +```bash +analyzer api [sub-resource] [flags] +``` + +## Schema Introspection + +Before calling any API method, inspect it: + +```bash +# Browse all resources +analyzer schema api + +# Inspect a specific method +analyzer schema api.scans.create + +# Browse a resource's methods +analyzer schema api.scans.compliance-check +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Security Rules + +- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution +- **Always** confirm with user before executing write/delete commands +- Prefer `--fields` to limit response size and protect the context window +- Poll scan status — do not guess when a scan completes + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{{"error": {{"code": 404, "message": "Object not found"}}}} +``` + +Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. +"# + ) +} From 63681bb631df5a4e23cbeb006140f2ad7b62ee1c Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 15:03:09 +0100 Subject: [PATCH 03/38] feat(dommyrock-analyzer-cli): generate skills files --- docs/skills.md | 11 +++ skills/analyzer-health/SKILL.md | 58 +++++++++++++ skills/analyzer-objects/SKILL.md | 75 ++++++++++++++++ skills/analyzer-scans/SKILL.md | 143 +++++++++++++++++++++++++++++++ skills/analyzer-shared/SKILL.md | 72 ++++++++++++++++ 5 files changed, 359 insertions(+) create mode 100644 docs/skills.md create mode 100644 skills/analyzer-health/SKILL.md create mode 100644 skills/analyzer-objects/SKILL.md create mode 100644 skills/analyzer-scans/SKILL.md create mode 100644 skills/analyzer-shared/SKILL.md diff --git a/docs/skills.md b/docs/skills.md new file mode 100644 index 0000000..412226b --- /dev/null +++ b/docs/skills.md @@ -0,0 +1,11 @@ +# Skills Index + +> Auto-generated by `analyzer generate-skills`. Do not edit manually. + +| Skill | Description | +|-------|-------------| +| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling. | +| [analyzer-health](../skills/analyzer-health/SKILL.md) | Check API service health. | +| [analyzer-objects](../skills/analyzer-objects/SKILL.md) | Manage firmware/software objects (CRUD operations). | +| [analyzer-scans](../skills/analyzer-scans/SKILL.md) | Manage scans, results, compliance checks, reports, and documents. | + diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md new file mode 100644 index 0000000..87481b7 --- /dev/null +++ b/skills/analyzer-health/SKILL.md @@ -0,0 +1,58 @@ +--- +name: analyzer-health +version: 0.5.0 +description: "Check API service health." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api health --help" +--- + +# analyzer health + +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. + +```bash +analyzer api health [flags] +``` + +## API Methods + + - `list` (GET) — Returns if the service is in an healthy state. + +## Flags + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | +| `--json ''` | Request body for POST/PUT/PATCH methods | +| `--dry-run` | Print the request without executing | + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api health --help + +# Inspect a method's required params, types, and defaults +analyzer schema api.health. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Examples + +```bash +# Check service health +analyzer api health list +``` + +## See Also + +- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [CONTEXT.md](../../CONTEXT.md) — Full agent reference + diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md new file mode 100644 index 0000000..dd74682 --- /dev/null +++ b/skills/analyzer-objects/SKILL.md @@ -0,0 +1,75 @@ +--- +name: analyzer-objects +version: 0.5.0 +description: "Manage firmware/software objects (CRUD operations)." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api objects --help" +--- + +# analyzer objects + +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. + +```bash +analyzer api objects [flags] +``` + +## API Methods + + - `create` (POST) — Create new object + - `delete` (DELETE) — Deletes a object and all related scans. + - `get` (GET) — Retrieve an object by its ID. + - `list` (GET) — Retrieve a list of all objects of the current user. + - `update` (PUT) — Update an object + +### scans + + - `list` (GET) — Those scans could be + +## Flags + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | +| `--json ''` | Request body for POST/PUT/PATCH methods | +| `--dry-run` | Print the request without executing | + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api objects --help + +# Inspect a method's required params, types, and defaults +analyzer schema api.objects. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Examples + +```bash +# List all objects +analyzer api objects list + +# Get a specific object +analyzer api objects get --params '{"id": "OBJ_ID"}' + +# Create an object (dry-run first) +analyzer api objects create --json '{"name": "Router FW", "description": "Edge router firmware"}' --dry-run + +# Delete an object +analyzer api objects delete --params '{"id": "OBJ_ID"}' --dry-run +``` + +## See Also + +- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [CONTEXT.md](../../CONTEXT.md) — Full agent reference + diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md new file mode 100644 index 0000000..b1438a4 --- /dev/null +++ b/skills/analyzer-scans/SKILL.md @@ -0,0 +1,143 @@ +--- +name: analyzer-scans +version: 0.5.0 +description: "Manage scans, results, compliance checks, reports, and documents." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api scans --help" +--- + +# analyzer scans + +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. + +```bash +analyzer api scans [flags] +``` + +## API Methods + + - `create` (POST) — Schedule a new scan. + - `delete` (DELETE) — Delete a scan. + - `get` (GET) — Returns a scan. + - `list` (GET) — Retrieve a list of scans. + +### cancel + + - `create` (POST) — This can be used to cancel an already pending or running scan. + +### compliance-check + +### cyber-resilience-act + + - `list` (GET) — Computes compliance with Cyber Resilience Act + +### ai-suggestion + +### begin + + - `create` (POST) — Triggers CRA AI suggestion using user-provided documents. + +### status + + - `list` (GET) — Returns status of the CRA AI suggestion. + +### overwrite + + - `overwrite_compliance_check_requirement` (PUT) — Overwrites compliance check requirement + +### report + + - `list` (GET) — Downloads Cyber Resilience Act compliance report as PDF + +### documents + + - `create` (POST) — Upload a document for a scan. + - `delete` (DELETE) — Delete a single document for a scan. + - `delete_documents` (DELETE) — Delete all documents for a scan. + - `list` (GET) — List documents for a scan. + +### overview + + - `get` (GET) — Returns an overview of one analysis. + - `list` (GET) — Returns an aggregated overview of all analysis executed for one scan. + +### report + + - `list` (GET) — + +### results + + - `get` (GET) — Retrieve the results of one specific analysis of a scan. + +### sbom + + - `list` (GET) — + +### score + + - `list` (GET) — Returns a security score of all successful finished analyses with their individual scores included. + +### status + + - `list` (GET) — Returns the status of a scan. + +### types + + - `list` (GET) — Returns a list of all available analysis types for each different image. + +## Flags + +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | +| `--json ''` | Request body for POST/PUT/PATCH methods | +| `--dry-run` | Print the request without executing | + +## Discovering Commands + +Before calling any API method, inspect it: + +```bash +# Browse resources and methods +analyzer api scans --help + +# Inspect a method's required params, types, and defaults +analyzer schema api.scans. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Examples + +```bash +# List scans +analyzer api scans list + +# Schedule a scan (dry-run first) +analyzer api scans create --json '{...}' --dry-run + +# Poll scan status +analyzer api scans status list --params '{"id": "SCAN_ID"}' + +# Get security score +analyzer api scans score list --params '{"id": "SCAN_ID"}' + +# Get scan overview +analyzer api scans overview list --params '{"id": "SCAN_ID"}' + +# Get CVE results +analyzer api scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve"}' + +# Check CRA compliance +analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' +``` + +## See Also + +- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [CONTEXT.md](../../CONTEXT.md) — Full agent reference + diff --git a/skills/analyzer-shared/SKILL.md b/skills/analyzer-shared/SKILL.md new file mode 100644 index 0000000..041e656 --- /dev/null +++ b/skills/analyzer-shared/SKILL.md @@ -0,0 +1,72 @@ +--- +name: analyzer-shared +version: 0.5.0 +description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- + +# analyzer — Shared Reference + +## Authentication + +```bash +# Interactive login (prompts for API key, validates, saves) +analyzer login + +# Environment variable +export ANALYZER_API_KEY="your-api-key" +``` + +## Global Flags + +| Flag | Description | +|------|-------------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--dry-run` | Validate and print request without executing | +| `--discovery ` | Discovery document location (also: `ANALYZER_DISCOVERY_URL` env var) | +| `--format ` | Output format: `human` (default), `json` | + +## CLI Syntax + +```bash +analyzer api [sub-resource] [flags] +``` + +## Schema Introspection + +Before calling any API method, inspect it: + +```bash +# Browse all resources +analyzer schema api + +# Inspect a specific method +analyzer schema api.scans.create + +# Browse a resource's methods +analyzer schema api.scans.compliance-check +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Security Rules + +- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution +- **Always** confirm with user before executing write/delete commands +- Prefer `--fields` to limit response size and protect the context window +- Poll scan status — do not guess when a scan completes + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{"error": {"code": 404, "message": "Object not found"}} +``` + +Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. From 8774a4618e7f84742c69c690dc98eaa4b2ed04fc Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 16:37:44 +0100 Subject: [PATCH 04/38] feat(dommyrock-analyzer-cli): multi service api support --- .github/workflows/update-discovery.yml | 55 +- CONTEXT.md | 132 +- analyzer-discovery.json | 2227 +----------------------- docs/skills.md | 9 +- skills/analyzer-health/SKILL.md | 20 +- skills/analyzer-objects/SKILL.md | 35 +- skills/analyzer-scans/SKILL.md | 45 +- skills/shared/SKILL.md | 84 + src/api/generate_skills.rs | 564 +++--- src/api/schema.rs | 15 +- src/discovery.rs | 42 + src/main.rs | 94 +- src/services.rs | 72 + 13 files changed, 797 insertions(+), 2597 deletions(-) create mode 100644 skills/shared/SKILL.md create mode 100644 src/services.rs diff --git a/.github/workflows/update-discovery.yml b/.github/workflows/update-discovery.yml index 9c98caa..8091f02 100644 --- a/.github/workflows/update-discovery.yml +++ b/.github/workflows/update-discovery.yml @@ -1,4 +1,4 @@ -name: Update Discovery Document +name: Update Discovery Documents on: schedule: @@ -9,28 +9,45 @@ permissions: contents: write pull-requests: write +env: + SERVICES: | + analyzer=https://analyzer.exein.dev/analyzer-discovery.json + # isaac=https://analyzer.exein.dev/isaac-discovery.json + # vuln-tracker=https://analyzer.exein.dev/vuln-tracker-discovery.json + jobs: update: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Fetch latest discovery.json + - name: Fetch all discovery documents run: | - curl -sf https://analyzer.exein.dev/discovery.json \ - -H "Authorization: Bearer ${{ secrets.ANALYZER_API_KEY }}" \ - -o analyzer-discovery.json.new + mkdir -p discovery + while IFS='=' read -r name url; do + [ -z "$name" ] && continue + [[ "$name" == \#* ]] && continue + echo "Fetching $name from $url" + curl -sf "$url" \ + -H "Authorization: Bearer ${{ secrets.ANALYZER_API_KEY }}" \ + -o "discovery/${name}.json.new" || echo "WARN: failed to fetch $name" + done <<< "$SERVICES" - name: Check for changes id: diff run: | - if diff -q analyzer-discovery.json analyzer-discovery.json.new > /dev/null 2>&1; then - echo "changed=false" >> "$GITHUB_OUTPUT" - rm analyzer-discovery.json.new - else - echo "changed=true" >> "$GITHUB_OUTPUT" - mv analyzer-discovery.json.new analyzer-discovery.json - fi + changed=false + for f in discovery/*.json.new; do + [ ! -f "$f" ] && continue + base="${f%.new}" + if ! diff -q "$base" "$f" > /dev/null 2>&1; then + mv "$f" "$base" + changed=true + else + rm "$f" + fi + done + echo "changed=$changed" >> "$GITHUB_OUTPUT" - name: Install Rust toolchain if: steps.diff.outputs.changed == 'true' @@ -50,18 +67,24 @@ jobs: if: steps.diff.outputs.changed == 'true' run: | cargo build --release - ./target/release/analyzer --discovery ./analyzer-discovery.json generate-skills + for f in discovery/*.json; do + [ ! -f "$f" ] && continue + name="$(basename "$f" .json)" + echo "Generating skills for $name" + ./target/release/analyzer --discovery "$f" generate-skills \ + || echo "WARN: failed to generate skills for $name" + done - name: Create PR if: steps.diff.outputs.changed == 'true' uses: peter-evans/create-pull-request@v6 with: - title: "update: Discovery Document + skills" + title: "update: Discovery Documents + skills" body: | Auto-generated from upstream API changes. - The discovery document at `analyzer-discovery.json` has changed. + Discovery documents in `discovery/` have changed. Skill files in `skills/` have been regenerated. branch: update-discovery - commit-message: "update: discovery.json + regenerated skills" + commit-message: "update: discovery documents + regenerated skills" delete-branch: true diff --git a/CONTEXT.md b/CONTEXT.md index 59e7a9e..d101a6e 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -1,31 +1,52 @@ # Analyzer CLI (`analyzer`) Context -The `analyzer` CLI provides dynamic access to a firmware and software image security analysis API by parsing a Discovery Document at runtime. It manages objects (firmware images), schedules security scans, retrieves vulnerability findings (CVE, hardening, crypto, SBOM, malware, kernel), checks compliance (Cyber Resilience Act), and generates reports — all driven from a single `discovery.json` with no hardcoded commands. +The `analyzer` CLI provides dynamic access to firmware and software security APIs by parsing Discovery Documents at runtime. It supports multiple API services through a compile-time service registry, with each service backed by its own Discovery Document fetched and cached automatically. + +## Registered Services + +| Alias | Description | +|-------|-------------| +| `analyzer` | Firmware and software image security analysis | + + ## Rules of Engagement for Agents -* **Schema Discovery:** *If you don't know the exact JSON payload structure, run `analyzer schema .` first to inspect the schema before executing.* +* **Schema Discovery:** *If you don't know the exact JSON payload structure, run `analyzer schema ..` first to inspect the schema before executing.* * **Context Window Protection:** *Scan results and overview responses can be large. ALWAYS use `--fields` when listing or getting resources to avoid overwhelming your context window.* * **Dry-Run Safety:** *Always use the `--dry-run` flag for mutating operations (create, update, delete) to validate your JSON payload before actual execution.* -* **Poll, Don't Guess:** *After scheduling a scan, poll `analyzer api scans status list` until it completes. Do not assume timing or make further requests against incomplete scans.* +* **Poll, Don't Guess:** *After scheduling a scan, poll `analyzer api analyzer scans status list` until it completes. Do not assume timing or make further requests against incomplete scans.* * **One Step at a Time:** *Verify each step succeeded (exit code 0, valid JSON response) before proceeding to the next.* ## Core Syntax +### Multi-API commands (service name is first positional arg) + +```bash +analyzer api [sub-resource...] [flags] +``` + +### Schema introspection (service name is first dotted segment) + +```bash +analyzer schema .. +``` + +### Generate skills for all registered services + ```bash -analyzer api [sub-resource...] [flags] +analyzer generate-skills ``` -Use `--help` to get help on the available commands. +### Navigation / help ```bash analyzer --help -analyzer api --help -analyzer api objects --help -analyzer api scans --help -analyzer api scans overview --help -analyzer api scans compliance-check cyber-resilience-act --help -analyzer api scans compliance-check cyber-resilience-act ai-suggestion --help +analyzer api analyzer --help +analyzer api analyzer objects --help +analyzer api analyzer scans --help +analyzer api analyzer scans overview --help +analyzer api analyzer scans compliance-check cyber-resilience-act --help ``` ### Key Flags @@ -35,6 +56,7 @@ analyzer api scans compliance-check cyber-resilience-act ai-suggestion --help - `--fields ''`: Limits the response fields (critical for AI context window efficiency). - `--dry-run`: Validates and prints the request without executing. Use before every mutation. - `--output json|table`: Output format. Default: `json`. Agents should always use `json`. +- `--discovery `: Override the discovery source for dev/testing (bypasses service registry). ## Usage Patterns @@ -44,16 +66,16 @@ Always use `--fields` to minimize tokens. ```bash # List objects with field mask -analyzer api objects list --params '{"limit": 10}' --fields "id,name,tags" +analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name,tags" # Get a single object -analyzer api objects get --params '{"id": "OBJ_ID"}' +analyzer api analyzer objects get --params '{"id": "OBJ_ID"}' # Get scan details -analyzer api scans get --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans get --params '{"id": "SCAN_ID"}' # Check service health -analyzer api health list +analyzer api analyzer health list ``` ### 2. Writing Data (POST/PUT/PATCH) @@ -62,13 +84,13 @@ Use `--json` for the request body. Always `--dry-run` first. ```bash # Create an object (firmware image) -analyzer api objects create --json '{"name": "Router FW v2.1", "description": "Edge router firmware", "tags": ["router", "v2.1"]}' --dry-run +analyzer api analyzer objects create --json '{"name": "Router FW v2.1", "description": "Edge router firmware", "tags": ["router", "v2.1"]}' --dry-run # Update an object -analyzer api objects update --params '{"id": "OBJ_ID"}' --json '{"name": "Router FW v2.1.1", "favorite": true}' --dry-run +analyzer api analyzer objects update --params '{"id": "OBJ_ID"}' --json '{"name": "Router FW v2.1.1", "favorite": true}' --dry-run # Schedule a new scan -analyzer api scans create --json '{...}' --dry-run +analyzer api analyzer scans create --json '{...}' --dry-run ``` ### 3. Deleting Data @@ -77,16 +99,16 @@ Always `--dry-run` first. Deletions are irreversible. ```bash # Delete an object and all its scans -analyzer api objects delete --params '{"id": "OBJ_ID"}' --dry-run +analyzer api analyzer objects delete --params '{"id": "OBJ_ID"}' --dry-run # Delete a scan -analyzer api scans delete --params '{"id": "SCAN_ID"}' --dry-run +analyzer api analyzer scans delete --params '{"id": "SCAN_ID"}' --dry-run # Delete a single document -analyzer api scans documents delete --params '{"id": "SCAN_ID", "file_name": "datasheet.pdf"}' --dry-run +analyzer api analyzer scans documents delete --params '{"id": "SCAN_ID", "file_name": "datasheet.pdf"}' --dry-run # Delete all documents for a scan -analyzer api scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +analyzer api analyzer scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run ``` ### 4. Schema Introspection @@ -94,10 +116,10 @@ analyzer api scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry If unsure about parameters or body structure, check the schema: ```bash -analyzer schema api.objects.create -analyzer schema api.scans.create -analyzer schema api.scans.results.get -analyzer schema api.scans.compliance-check.cyber-resilience-act.list +analyzer schema analyzer.objects.create +analyzer schema analyzer.scans.create +analyzer schema analyzer.scans.results.get +analyzer schema analyzer.scans.compliance-check.cyber-resilience-act.list ``` ### 5. Scan Lifecycle (Poll-Based) @@ -106,73 +128,95 @@ Scans are asynchronous. Schedule, poll, then retrieve results. ```bash # Schedule -analyzer api scans create --json '{...}' +analyzer api analyzer scans create --json '{...}' # Poll status until complete -analyzer api scans status list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans status list --params '{"id": "SCAN_ID"}' # → {"id": "SCAN_ID", "status": "running"} # → {"id": "SCAN_ID", "status": "finished"} # Cancel if needed -analyzer api scans cancel create --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans cancel create --params '{"id": "SCAN_ID"}' ``` ### 6. Scan Results and Scoring ```bash # Aggregated overview (CVE, hardening, crypto, SBOM, malware, kernel, etc.) -analyzer api scans overview list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans overview list --params '{"id": "SCAN_ID"}' # Single analysis overview -analyzer api scans overview get --params '{"scan_id": "SCAN_ID", "analysis_id": "ANALYSIS_ID"}' +analyzer api analyzer scans overview get --params '{"scan_id": "SCAN_ID", "analysis_id": "ANALYSIS_ID"}' # Security score -analyzer api scans score list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans score list --params '{"id": "SCAN_ID"}' # Detailed findings with pagination and filtering -analyzer api scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve", "page": 1, "per-page": 50}' +analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve", "page": 1, "per-page": 50}' # SBOM export -analyzer api scans sbom list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans sbom list --params '{"id": "SCAN_ID"}' # Full report download -analyzer api scans report list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans report list --params '{"id": "SCAN_ID"}' ``` ### 7. Compliance — Cyber Resilience Act (CRA) ```bash # Get CRA compliance report -analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' # Download CRA report as PDF -analyzer api scans compliance-check cyber-resilience-act report list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans compliance-check cyber-resilience-act report list --params '{"id": "SCAN_ID"}' # Overwrite a requirement (manual assessment) -analyzer api scans compliance-check cyber-resilience-act overwrite overwrite_compliance_check_requirement \ +analyzer api analyzer scans compliance-check cyber-resilience-act overwrite overwrite_compliance_check_requirement \ --params '{"id": "SCAN_ID"}' --json '{...}' --dry-run # Trigger AI suggestion -analyzer api scans compliance-check cyber-resilience-act ai-suggestion begin create --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans compliance-check cyber-resilience-act ai-suggestion begin create --params '{"id": "SCAN_ID"}' # Poll AI suggestion status -analyzer api scans compliance-check cyber-resilience-act ai-suggestion status list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans compliance-check cyber-resilience-act ai-suggestion status list --params '{"id": "SCAN_ID"}' ``` ### 8. Documents (Scan Attachments) ```bash # List documents for a scan -analyzer api scans documents list --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans documents list --params '{"id": "SCAN_ID"}' # Upload a document -analyzer api scans documents create --params '{"id": "SCAN_ID"}' +analyzer api analyzer scans documents create --params '{"id": "SCAN_ID"}' # Delete a single document -analyzer api scans documents delete --params '{"id": "SCAN_ID", "file_name": "FILE"}' --dry-run +analyzer api analyzer scans documents delete --params '{"id": "SCAN_ID", "file_name": "FILE"}' --dry-run # Delete all documents -analyzer api scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +analyzer api analyzer scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +``` + +### 9. Dev/Testing with Local Discovery Document + +```bash +# Override discovery source — bypasses service registry +analyzer --discovery ./analyzer-discovery.json api analyzer scans list + +# Or with a URL +analyzer --discovery https://staging.example.com/discovery.json api analyzer scans list +``` + +## Human-Friendly Commands (Unchanged) + +The classic CLI subcommands work the same as before: + +```bash +analyzer login +analyzer whoami +analyzer object list +analyzer scan new --object OBJ_ID -f firmware.bin -t linux --wait +analyzer scan score --scan SCAN_ID ``` ## Error Handling diff --git a/analyzer-discovery.json b/analyzer-discovery.json index 5e4f996..bf51dfb 100644 --- a/analyzer-discovery.json +++ b/analyzer-discovery.json @@ -1,2226 +1 @@ -{ - "kind": "discovery#restDescription", - "discoveryVersion": "v1", - "id": "analyzer-api-routes:0.5.0", - "name": "analyzer-api-routes", - "version": "0.5.0", - "title": "Analyzer API routes", - "description": "", - "protocol": "rest", - "rootUrl": "", - "servicePath": "", - "schemas": { - "AiResult": { - "id": "AiResult", - "properties": { - "reasoning": { - "description": "AI reasoning, which lead to current status", - "type": "string" - }, - "sources": { - "description": "List of documents used by AI to produce current status.", - "items": { - "$ref": "UserUploadedDocument" - }, - "type": "array" - }, - "status": { - "$ref": "AiStatus" - }, - "user-action": { - "$ref": "SuggestionResponse" - } - }, - "type": "object" - }, - "AiStatus": { - "description": "Represents the status of a requirement determined by ai", - "enum": [ - "passed", - "failed", - "unknown" - ], - "id": "AiStatus", - "type": "string" - }, - "AiSuggestionStatus": { - "description": "Status of the AI suggestions computation.", - "id": "AiSuggestionStatus", - "properties": { - "status": { - "$ref": "Status" - } - }, - "type": "object" - }, - "AnalysisFilter": { - "id": "AnalysisFilter", - "properties": { - "query-name": { - "$ref": "QueryName" - }, - "values": { - "description": "Avaliable filter values with their count.", - "items": { - "$ref": "FilterValue" - }, - "type": "array" - } - }, - "type": "object" - }, - "AnalysisFindings": { - "description": "Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.", - "id": "AnalysisFindings" - }, - "AnalysisId": { - "description": "A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.", - "format": "uuid", - "id": "AnalysisId", - "type": "string" - }, - "AnalysisInfo": { - "description": "Helper struct to define if a analysis should be by default enabled", - "id": "AnalysisInfo", - "properties": { - "default": { - "type": "boolean" - }, - "type": { - "type": "string" - } - }, - "type": "object" - }, - "AnalysisOverview": { - "description": "Like [`ScanOverview`] but for single analysis.", - "id": "AnalysisOverview" - }, - "AnalysisQueryUnion": { - "description": "Union of all available query parameters for analyses.", - "id": "AnalysisQueryUnion" - }, - "AnalysisResultDTO": { - "description": "AnalysisResult but with count of all findings,\nbefore pagination was applied.", - "id": "AnalysisResultDTO", - "properties": { - "filters": { - "description": "Filters that can be used in this analysis.", - "type": "object" - }, - "findings": { - "$ref": "AnalysisFindings" - }, - "total-findings": { - "description": "Total count of findings _after_ filtering, but _before_ pagination.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "AnalysisScore": { - "description": "The score of an analysis,", - "id": "AnalysisScore", - "properties": { - "id": { - "$ref": "AnalysisId" - }, - "score": { - "$ref": "Score" - }, - "type": { - "$ref": "AnalysisType" - } - }, - "type": "object" - }, - "AnalysisState": { - "description": "A analysis that runs for one particular system image.", - "id": "AnalysisState", - "properties": { - "id": { - "$ref": "AnalysisId" - }, - "status": { - "$ref": "AnalysisStatus" - }, - "type": { - "$ref": "ScanType" - } - }, - "type": "object" - }, - "AnalysisStatus": { - "description": "Represents the current execution status of an analysis task.", - "enum": [ - "success", - "pending", - "in-progress", - "canceled", - "error" - ], - "id": "AnalysisStatus", - "type": "string" - }, - "AnalysisType": { - "description": "Type of the analysis", - "enum": [ - "info", - "kernel", - "cve", - "password-hash", - "hardening", - "malware", - "software-bom", - "crypto", - "capabilities", - "symbols", - "tasks", - "stack-overflow" - ], - "id": "AnalysisType", - "type": "string" - }, - "AnalyzerResult": { - "id": "AnalyzerResult", - "properties": { - "status": { - "$ref": "AnalyzerStatus" - } - }, - "type": "object" - }, - "AnalyzerStatus": { - "description": "Represents the status of a requirement determined by analyzer", - "enum": [ - "passed", - "failed", - "unknown", - "not-applicable" - ], - "id": "AnalyzerStatus", - "type": "string" - }, - "ApiScanType": { - "description": "List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.", - "id": "ApiScanType" - }, - "BindFilter": { - "enum": [ - "local", - "global", - "weak" - ], - "id": "BindFilter", - "type": "string" - }, - "CapabilitiesOverview": { - "description": "Overview for Capability analysis.", - "id": "CapabilitiesOverview", - "properties": { - "capabilities": { - "description": "Capability found and their number of occurrences.", - "type": "object" - }, - "counts": { - "$ref": "RiskLevelCount" - }, - "executable_count": { - "description": "Total number executables.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CapabilityParams": { - "id": "CapabilityParams", - "properties": { - "search": { - "type": "string" - }, - "severity-filter": { - "items": { - "$ref": "SeverityFilter" - }, - "type": "array" - }, - "sort-by": { - "$ref": "CapabilitySortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "CapabilitySortBy": { - "enum": [ - "severity" - ], - "id": "CapabilitySortBy", - "type": "string" - }, - "Checks": { - "description": "Represents the checks performed in the report", - "id": "Checks", - "properties": { - "failed": { - "description": "Number of checks that failed (determined either by analyzer or overwritten by the user)", - "format": "int32", - "type": "integer" - }, - "not-applicable": { - "description": "Number of not applicable requirements", - "format": "int32", - "type": "integer" - }, - "passed": { - "description": "Number of checks that passed (determined either by analyzer or overwritten by the user)", - "format": "int32", - "type": "integer" - }, - "suggestion-available": { - "description": "Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.", - "format": "int32", - "type": "integer" - }, - "total": { - "description": "Total number of checks performed", - "format": "int32", - "type": "integer" - }, - "unknown": { - "description": "Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ComponentType": { - "enum": [ - "application", - "framework", - "library", - "container", - "operating-system", - "device", - "firmware", - "file" - ], - "id": "ComponentType", - "type": "string" - }, - "CreateObject": { - "description": "The request to create a new object.", - "id": "CreateObject", - "properties": { - "description": { - "description": "Description of the object.", - "type": "string" - }, - "name": { - "description": "Name of the object.", - "type": "string" - }, - "tags": { - "description": "Tags associated with the object.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "CryptoOverview": { - "description": "Overview for Crypto analysis.", - "id": "CryptoOverview", - "properties": { - "certificates": { - "description": "Number of certificates found.", - "format": "int64", - "type": "integer" - }, - "private_keys": { - "description": "Number of private keys found.", - "format": "int64", - "type": "integer" - }, - "public_keys": { - "description": "Number of public keys found.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CryptoParams": { - "id": "CryptoParams", - "properties": { - "search": { - "type": "string" - }, - "sort-by": { - "$ref": "CryptoSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - }, - "type-filter": { - "items": { - "$ref": "CryptoTypeFilter" - }, - "type": "array" - } - }, - "type": "object" - }, - "CryptoSortBy": { - "enum": [ - "type", - "key-size", - "filename", - "path", - "issuer" - ], - "id": "CryptoSortBy", - "type": "string" - }, - "CryptoTypeFilter": { - "enum": [ - "certificate", - "private-key", - "public-key" - ], - "id": "CryptoTypeFilter", - "type": "string" - }, - "CveOverview": { - "description": "Overview for Cve analysis.", - "id": "CveOverview", - "properties": { - "counts": { - "$ref": "CveSeverityCount" - }, - "products": { - "description": "Cve counts for each \"product\" (binary, library, etc.).", - "type": "object" - }, - "total": { - "description": "Sum of all `counts`.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CveParams": { - "id": "CveParams", - "properties": { - "patch-filter": { - "items": { - "$ref": "CvePatchFilter" - }, - "type": "array" - }, - "search": { - "type": "string" - }, - "severity-filter": { - "items": { - "$ref": "CveSeverityFilter" - }, - "type": "array" - }, - "sort-by": { - "$ref": "CveSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "CvePatchFilter": { - "enum": [ - "available", - "unavailable" - ], - "id": "CvePatchFilter", - "type": "string" - }, - "CveSeverityCount": { - "description": "Maps CVE severity to its count", - "id": "CveSeverityCount", - "properties": { - "critical": { - "format": "int64", - "type": "integer" - }, - "high": { - "format": "int64", - "type": "integer" - }, - "low": { - "format": "int64", - "type": "integer" - }, - "medium": { - "format": "int64", - "type": "integer" - }, - "unknown": { - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CveSeverityFilter": { - "enum": [ - "low", - "medium", - "high", - "critical" - ], - "id": "CveSeverityFilter", - "type": "string" - }, - "CveSortBy": { - "enum": [ - "severity" - ], - "id": "CveSortBy", - "type": "string" - }, - "CyberResilienceActReport": { - "description": "Represents a Cyber Resilience Act report", - "id": "CyberResilienceActReport", - "properties": { - "checks": { - "$ref": "Checks" - }, - "created-at": { - "description": "Date and time when the report was created.", - "format": "date-time", - "type": "string" - }, - "name": { - "description": "Name of the report.", - "type": "string" - }, - "sections": { - "description": "List of categories in the report.", - "items": { - "$ref": "Section" - }, - "type": "array" - }, - "updated-at": { - "description": "Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "DockerAnalysis": { - "description": "Represents different types of analyses for Docker containers.", - "enum": [ - "info", - "cve", - "password-hash", - "crypto", - "software-bom", - "malware", - "hardening", - "capabilities" - ], - "id": "DockerAnalysis", - "type": "string" - }, - "DockerInfo": { - "description": "Container metadata information\n\nRepresents various metadata attributes of a container image", - "id": "DockerInfo", - "properties": { - "arch": { - "description": "List of supported CPU architectures for the container", - "items": { - "type": "string" - }, - "type": "array" - }, - "ctime": { - "description": "List of creation timestamps for container layers", - "items": { - "type": "string" - }, - "type": "array" - }, - "env": { - "description": "List of environment variables defined in the container", - "items": { - "type": "string" - }, - "type": "array" - }, - "history": { - "description": "List of commands used to build the container layers", - "items": { - "$ref": "History" - }, - "type": "array" - }, - "os": { - "description": "List of supported operating systems for the container", - "items": { - "type": "string" - }, - "type": "array" - }, - "os_name": { - "description": "Name of the base operating system used in the container", - "type": "string" - }, - "os_version": { - "description": "Version of the base operating system used in the container", - "type": "string" - }, - "tags": { - "description": "List of container image tags associated with the image", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "DockerInfoResult": { - "description": "Info result for docker image", - "id": "DockerInfoResult" - }, - "DocumentListItem": { - "description": "A single document entry in a listing.", - "id": "DocumentListItem", - "properties": { - "file-name": { - "description": "Original file name, serves as the unique key within a scan's document storage", - "type": "string" - } - }, - "type": "object" - }, - "DocumentListResponse": { - "description": "A list of documents associated with a scan.", - "id": "DocumentListResponse", - "properties": { - "documents": { - "items": { - "$ref": "DocumentListItem" - }, - "type": "array" - } - }, - "type": "object" - }, - "DocumentUploadResponse": { - "description": "The response after successfully uploading a document.", - "id": "DocumentUploadResponse", - "properties": { - "file-name": { - "description": "Original file name, serves as the unique key within a scan's document storage", - "type": "string" - } - }, - "type": "object" - }, - "FeaturesFilter": { - "enum": [ - "seccomp", - "seccomp-filter", - "security-network", - "stack-protector", - "fortify-source", - "vmap-kernel-stack", - "usercopy", - "heap-freelist-obfuscation", - "executable-memory-protection", - "kaslr", - "apparmor", - "selinux", - "smack", - "tomoyo", - "yama" - ], - "id": "FeaturesFilter", - "type": "string" - }, - "FilterValue": { - "id": "FilterValue", - "properties": { - "count": { - "description": "Count of findings matching this value for current filter options.", - "format": "int64", - "type": "integer" - }, - "value": { - "description": "Filter value that can be passed in query paramters.", - "type": "string" - } - }, - "type": "object" - }, - "HardeningOverview": { - "description": "Overview for Hardening analysis.", - "id": "HardeningOverview", - "properties": { - "counts": { - "$ref": "HardeningSeverityCount" - }, - "total": { - "description": "Sum of all `counts`.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "HardeningParams": { - "id": "HardeningParams", - "properties": { - "search": { - "type": "string" - }, - "severity-filter": { - "items": { - "$ref": "HardeningSeverityFilter" - }, - "type": "array" - }, - "sort-by": { - "$ref": "HardeningSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "HardeningSeverityCount": { - "description": "Maps Hardening severity to its count", - "id": "HardeningSeverityCount", - "properties": { - "high": { - "format": "int64", - "type": "integer" - }, - "low": { - "format": "int64", - "type": "integer" - }, - "medium": { - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "HardeningSeverityFilter": { - "enum": [ - "low", - "medium", - "high" - ], - "id": "HardeningSeverityFilter", - "type": "string" - }, - "HardeningSortBy": { - "enum": [ - "severity", - "filename", - "canary", - "nx", - "pie", - "relro", - "fortify" - ], - "id": "HardeningSortBy", - "type": "string" - }, - "HealthStatus": { - "description": "Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.", - "id": "HealthStatus", - "properties": { - "healthy": { - "type": "boolean" - }, - "message": { - "type": "string" - } - }, - "type": "object" - }, - "History": { - "id": "History", - "properties": { - "created": { - "format": "date-time", - "type": "string" - }, - "created_by": { - "type": "string" - }, - "empty_layer": { - "type": "boolean" - } - }, - "type": "object" - }, - "IdfAnalysis": { - "description": "Represents analyses specific to IDF (IoT Device Framework) targets.", - "enum": [ - "info", - "cve", - "software-bom", - "symbols", - "tasks", - "stack-overflow" - ], - "id": "IdfAnalysis", - "type": "string" - }, - "IdfInfo": { - "description": "IdfInfo analysis entry for idf image", - "id": "IdfInfo", - "properties": { - "arch": { - "description": "Architecture type", - "type": "string" - }, - "compiler": { - "description": "Compiler name and version used to create this image", - "type": "string" - }, - "freertos": { - "description": "freertos version", - "type": "string" - }, - "idf": { - "description": "idf version", - "type": "string" - } - }, - "type": "object" - }, - "IdfInfoResult": { - "description": "Info result for idf image", - "id": "IdfInfoResult" - }, - "IdfSymbolParams": { - "id": "IdfSymbolParams", - "properties": { - "bind-filter": { - "items": { - "$ref": "BindFilter" - }, - "type": "array" - }, - "search": { - "type": "string" - }, - "sort-by": { - "$ref": "IdfSymbolSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - }, - "type-filter": { - "items": { - "$ref": "TypeFilter" - }, - "type": "array" - } - }, - "type": "object" - }, - "IdfSymbolSortBy": { - "enum": [ - "name" - ], - "id": "IdfSymbolSortBy", - "type": "string" - }, - "IdfTaskParams": { - "id": "IdfTaskParams", - "properties": { - "search": { - "type": "string" - }, - "sort-by": { - "$ref": "IdfTaskSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "IdfTaskSortBy": { - "enum": [ - "function", - "name" - ], - "id": "IdfTaskSortBy", - "type": "string" - }, - "Image": { - "description": "A image on which a scan is executed", - "id": "Image", - "properties": { - "file_name": { - "description": "The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.", - "type": "string" - }, - "id": { - "$ref": "ImageId" - } - }, - "type": "object" - }, - "ImageId": { - "description": "A wrapper struct `ImageId` around a UUID.\n ID in the images table.", - "format": "uuid", - "id": "ImageId", - "type": "string" - }, - "ImageType": { - "description": "Type of the image used in scan", - "enum": [ - "linux", - "docker", - "idf" - ], - "id": "ImageType", - "type": "string" - }, - "Info": { - "id": "Info" - }, - "InfoOverview": { - "id": "InfoOverview" - }, - "KernelOverview": { - "description": "Overview for Kernel analysis.", - "id": "KernelOverview", - "properties": { - "count": { - "description": "Number of kernel security features enabled.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "KernelParams": { - "id": "KernelParams", - "properties": { - "features-filter": { - "items": { - "$ref": "FeaturesFilter" - }, - "type": "array" - }, - "sort-by": { - "$ref": "KernelSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - }, - "status-filter": { - "items": { - "$ref": "StatusFilter" - }, - "type": "array" - } - }, - "type": "object" - }, - "KernelSortBy": { - "enum": [ - "features", - "status" - ], - "id": "KernelSortBy", - "type": "string" - }, - "LinuxAnalysis": { - "description": "Represents different types of analyses that can be performed on a Linux system.", - "enum": [ - "info", - "kernel", - "cve", - "password-hash", - "crypto", - "software-bom", - "malware", - "hardening", - "capabilities" - ], - "id": "LinuxAnalysis", - "type": "string" - }, - "LinuxInfo": { - "description": "Represents the information about the system", - "id": "LinuxInfo", - "properties": { - "arch": { - "description": "The tags associated with the system", - "type": "string" - }, - "banner": { - "description": "The operating system name", - "type": "string" - }, - "kernel_version": { - "description": "The kernel version", - "type": "string" - }, - "libc": { - "description": "The operating system version", - "type": "string" - } - }, - "type": "object" - }, - "LinuxInfoResult": { - "description": "Info result for linux image", - "id": "LinuxInfoResult" - }, - "MalwareOverview": { - "description": "Overview for Malware analysis.", - "id": "MalwareOverview", - "properties": { - "count": { - "description": "Number of malware detected.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "MalwareParams": { - "id": "MalwareParams", - "properties": { - "sort-by": { - "$ref": "MalwareSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "MalwareSortBy": { - "enum": [ - "filename" - ], - "id": "MalwareSortBy", - "type": "string" - }, - "NewScanResponse": { - "description": "The response if a new scan is created.", - "id": "NewScanResponse", - "properties": { - "id": { - "$ref": "ScanId" - } - }, - "type": "object" - }, - "ObjectId": { - "description": "A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.", - "format": "uuid", - "id": "ObjectId", - "type": "string" - }, - "PasswordHashOverview": { - "description": "Overview for Password Hash analysis.", - "id": "PasswordHashOverview", - "properties": { - "count": { - "description": "Number of passwords decoded.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "PasswordHashParams": { - "id": "PasswordHashParams", - "properties": { - "severity-filter": { - "items": { - "$ref": "PasswordHashSeverityFilter" - }, - "type": "array" - }, - "sort-by": { - "$ref": "PasswordHashSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - } - }, - "type": "object" - }, - "PasswordHashSeverityFilter": { - "enum": [ - "medium", - "high" - ], - "id": "PasswordHashSeverityFilter", - "type": "string" - }, - "PasswordHashSortBy": { - "enum": [ - "severity", - "username" - ], - "id": "PasswordHashSortBy", - "type": "string" - }, - "QueryName": { - "description": "Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.", - "enum": [ - "license-filter" - ], - "id": "QueryName", - "type": "string" - }, - "Requirement": { - "description": "Represents a requirement in the report", - "id": "Requirement", - "properties": { - "advice": { - "description": "Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.", - "type": "string" - }, - "ai-suggestion": { - "$ref": "AiResult" - }, - "analyzer": { - "$ref": "AnalyzerResult" - }, - "description": { - "description": "Description of the requirement.", - "type": "string" - }, - "explanation": { - "description": "Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.", - "type": "string" - }, - "id": { - "$ref": "RequirementId" - }, - "policy-ref": { - "description": "Reference to the policy associated with the requirement.", - "type": "string" - }, - "status": { - "$ref": "RequirementStatus" - }, - "user-overwrite": { - "$ref": "UserResult" - } - }, - "type": "object" - }, - "RequirementId": { - "description": "Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.", - "enum": [ - "cve-exploits", - "password-strength", - "security-updates", - "update-notifications", - "access-control", - "unauthorized-access", - "data-encryption", - "data-integrity", - "data-collection", - "essential-availability", - "minimise-impact", - "attack-surfaces", - "attack-reduction", - "activity-monitoring", - "data-removal", - "vulns-documentation", - "vulns-security-updates", - "update-security-and-automation", - "security-testing-and-review", - "fixed-vulns-disclosure", - "vulns-coordinated-disclosure", - "vulns-reporting-contact", - "security-updates-dissemination" - ], - "id": "RequirementId", - "type": "string" - }, - "RequirementOverwrite": { - "description": "User action on a CRA requirement — either a manual overwrite or an AI suggestion response.", - "id": "RequirementOverwrite" - }, - "RequirementStatus": { - "description": "Overall status of the requirement\ncomputed by taking into account all user interactions.", - "enum": [ - "passed", - "failed", - "unknown", - "unknown-with-suggestion", - "not-applicable" - ], - "id": "RequirementStatus", - "type": "string" - }, - "RiskLevelCount": { - "description": "Count all different risk levels of the analysis.", - "id": "RiskLevelCount", - "properties": { - "critical": { - "format": "int64", - "type": "integer" - }, - "high": { - "format": "int64", - "type": "integer" - }, - "low": { - "format": "int64", - "type": "integer" - }, - "medium": { - "format": "int64", - "type": "integer" - }, - "none": { - "format": "int64", - "type": "integer" - }, - "unknown": { - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "SbomParams": { - "id": "SbomParams", - "properties": { - "license-filter": { - "items": { - "type": "string" - }, - "type": "array" - }, - "search": { - "type": "string" - }, - "sort-by": { - "$ref": "SbomSortBy" - }, - "sort-ord": { - "$ref": "SortOrd" - }, - "type-filter": { - "items": { - "$ref": "ComponentType" - }, - "type": "array" - } - }, - "type": "object" - }, - "SbomSortBy": { - "enum": [ - "name" - ], - "id": "SbomSortBy", - "type": "string" - }, - "Scan": { - "description": "Represents a scan that aggregates multiple analyses executed on a particular image.", - "id": "Scan", - "properties": { - "analysis": { - "description": "All analyses processed as part of this scan.", - "items": { - "$ref": "AnalysisState" - }, - "type": "array" - }, - "created": { - "description": "The date and time when the scan was initiated.", - "format": "date-time", - "type": "string" - }, - "id": { - "$ref": "ScanId" - }, - "image": { - "$ref": "Image" - }, - "image_type": { - "$ref": "ImageType" - }, - "info": { - "$ref": "Info" - }, - "score": { - "$ref": "ScanScore" - } - }, - "type": "object" - }, - "ScanId": { - "description": "A wrapper struct `ScanId` around a UUID.\n ID in the scans table.", - "format": "uuid", - "id": "ScanId", - "type": "string" - }, - "ScanOverview": { - "description": "Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.", - "id": "ScanOverview", - "properties": { - "capabilities": { - "$ref": "CapabilitiesOverview" - }, - "crypto": { - "$ref": "CryptoOverview" - }, - "cve": { - "$ref": "CveOverview" - }, - "hardening": { - "$ref": "HardeningOverview" - }, - "info": { - "$ref": "InfoOverview" - }, - "kernel": { - "$ref": "KernelOverview" - }, - "malware": { - "$ref": "MalwareOverview" - }, - "password-hash": { - "$ref": "PasswordHashOverview" - }, - "software-bom": { - "$ref": "SoftwareBOMOverview" - }, - "stack-overflow": { - "$ref": "StackOverflowOverview" - }, - "symbols": { - "$ref": "SymbolsOverview" - }, - "tasks": { - "$ref": "TasksOverview" - } - }, - "type": "object" - }, - "ScanScore": { - "description": "The calculate score with an weighted algorithm over all analysis.", - "id": "ScanScore", - "properties": { - "score": { - "$ref": "Score" - }, - "scores": { - "description": "Individual analyses scores.", - "items": { - "$ref": "AnalysisScore" - }, - "type": "array" - } - }, - "type": "object" - }, - "ScanStatus": { - "description": "The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).", - "id": "ScanStatus", - "properties": { - "id": { - "$ref": "ScanId" - }, - "status": { - "$ref": "AnalysisStatus" - } - }, - "type": "object" - }, - "ScanType": { - "description": "Represents a unified type for analyses across all supported images.", - "id": "ScanType" - }, - "Score": { - "description": "Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.", - "format": "int32", - "id": "Score", - "type": "integer" - }, - "Section": { - "description": "Represents a group of requirements, grouped by [SubSection]s.", - "id": "Section", - "properties": { - "label": { - "description": "Name of the requirement", - "type": "string" - }, - "policy-ref": { - "description": "Reference to the policy associated with the requirement", - "type": "string" - }, - "sub-sections": { - "description": "List of sub-requirements or checks associated with this requirement", - "items": { - "$ref": "SubSection" - }, - "type": "array" - } - }, - "type": "object" - }, - "SeverityFilter": { - "enum": [ - "none", - "low", - "medium", - "high", - "critical", - "unknown" - ], - "id": "SeverityFilter", - "type": "string" - }, - "SoftwareBOMOverview": { - "description": "Overview for Software BOM analysis.", - "id": "SoftwareBOMOverview", - "properties": { - "count": { - "description": "Total number of software BOM entries.", - "format": "int64", - "type": "integer" - }, - "licenses": { - "description": "License type and their number of occurrences.", - "type": "object" - } - }, - "type": "object" - }, - "SortOrd": { - "enum": [ - "asc", - "desc" - ], - "id": "SortOrd", - "type": "string" - }, - "StackOverflowOverview": { - "description": "Overview for Stack Overflow analysis.", - "id": "StackOverflowOverview", - "properties": { - "method": { - "description": "Name of the protection method used,\nor `None` if stack overflow protection is not enabled.", - "type": "string" - } - }, - "type": "object" - }, - "Status": { - "description": "Status of the AI suggestions computation.", - "enum": [ - "in-progress", - "finished" - ], - "id": "Status", - "type": "string" - }, - "StatusFilter": { - "enum": [ - "enabled", - "disabled" - ], - "id": "StatusFilter", - "type": "string" - }, - "SubSection": { - "description": "Represents a group of requirements", - "id": "SubSection", - "properties": { - "label": { - "description": "Name of the requirement", - "type": "string" - }, - "requirements": { - "description": "List of sub-requirements or checks associated with this requirement", - "items": { - "$ref": "Requirement" - }, - "type": "array" - } - }, - "type": "object" - }, - "SuggestionResponse": { - "description": "User response to AI suggestion.", - "enum": [ - "accepted", - "rejected" - ], - "id": "SuggestionResponse", - "type": "string" - }, - "SymbolsOverview": { - "description": "Overview for Symbol analysis.", - "id": "SymbolsOverview", - "properties": { - "count": { - "description": "Number of analyzed symbols.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "TasksOverview": { - "description": "Overview for Task analysis.", - "id": "TasksOverview", - "properties": { - "count": { - "description": "Number of analysed tasks.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "TypeFilter": { - "enum": [ - "sect", - "func", - "obj", - "file", - "notype" - ], - "id": "TypeFilter", - "type": "string" - }, - "UpdateObject": { - "description": "The request to update fields on an [`Object`].", - "id": "UpdateObject", - "properties": { - "description": { - "description": "Description of the object.", - "type": "string" - }, - "favorite": { - "description": "Sets if the object is a favorite or not.", - "type": "boolean" - }, - "name": { - "description": "Name of the object.", - "type": "string" - }, - "tags": { - "description": "The tags associated with the object.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "UserId": { - "description": "A wrapper struct `UserId` around a UUID.\n ID in the users table.", - "format": "uuid", - "id": "UserId", - "type": "string" - }, - "UserResult": { - "id": "UserResult", - "properties": { - "status": { - "$ref": "UserStatus" - } - }, - "type": "object" - }, - "UserStatus": { - "description": "Represents the status of a requirement overwritten by the user", - "enum": [ - "passed", - "failed" - ], - "id": "UserStatus", - "type": "string" - }, - "UserUploadedDocument": { - "description": "Description of the user provided file\nused by ai to give its suggestion.", - "id": "UserUploadedDocument", - "properties": { - "filename": { - "description": "Name of the user uploaded file.", - "type": "string" - } - }, - "type": "object" - } - }, - "resources": { - "api": { - "resources": { - "health": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.health.list", - "httpMethod": "GET", - "path": "api/health", - "description": "Returns if the service is in an healthy state.", - "response": { - "$ref": "HealthStatus" - } - } - } - }, - "objects": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.objects.create", - "httpMethod": "POST", - "path": "api/objects", - "description": "Create new object", - "request": { - "$ref": "CreateObject" - } - }, - "delete": { - "id": "analyzer-api-routes.api.objects.delete", - "httpMethod": "DELETE", - "path": "api/objects/{id}", - "description": "Deletes a object and all related scans.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Unique identifier of the object to delete" - } - }, - "parameterOrder": [ - "id" - ] - }, - "get": { - "id": "analyzer-api-routes.api.objects.get", - "httpMethod": "GET", - "path": "api/objects/{id}", - "description": "Retrieve an object by its ID.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": [ - "id" - ] - }, - "list": { - "id": "analyzer-api-routes.api.objects.list", - "httpMethod": "GET", - "path": "api/objects", - "description": "Retrieve a list of all objects of the current user.", - "parameters": { - "end_timestamp": { - "type": "string", - "required": false, - "location": "query", - "description": "End timestamp for pagination.", - "format": "datetime" - }, - "id": { - "type": "string", - "required": false, - "location": "query", - "description": "Pagination cursor (UUID).", - "format": "uuid" - }, - "limit": { - "type": "integer", - "required": false, - "location": "query", - "description": "Maximum number of items per page.", - "format": "int32" - }, - "start_timestamp": { - "type": "string", - "required": false, - "location": "query", - "description": "Start timestamp for pagination.", - "format": "datetime" - } - } - }, - "update": { - "id": "analyzer-api-routes.api.objects.update", - "httpMethod": "PUT", - "path": "api/objects/{id}", - "description": "Update an object", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": [ - "id" - ], - "request": { - "$ref": "UpdateObject" - } - } - }, - "resources": { - "scans": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.objects.scans.list", - "httpMethod": "GET", - "path": "api/objects/{id}/scans", - "description": "Those scans could be", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": [ - "id" - ] - } - } - } - } - }, - "scans": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.create", - "httpMethod": "POST", - "path": "api/scans", - "description": "Schedule a new scan.", - "response": { - "$ref": "NewScanResponse" - } - }, - "delete": { - "id": "analyzer-api-routes.api.scans.delete", - "httpMethod": "DELETE", - "path": "api/scans/{id}", - "description": "Delete a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ] - }, - "get": { - "id": "analyzer-api-routes.api.scans.get", - "httpMethod": "GET", - "path": "api/scans/{id}", - "description": "Returns a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "Scan" - } - }, - "list": { - "id": "analyzer-api-routes.api.scans.list", - "httpMethod": "GET", - "path": "api/scans", - "description": "Retrieve a list of scans." - } - }, - "resources": { - "cancel": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.cancel.create", - "httpMethod": "POST", - "path": "api/scans/{id}/cancel", - "description": "This can be used to cancel an already pending or running scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ] - } - } - }, - "compliance-check": { - "resources": { - "cyber-resilience-act": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act", - "description": "Computes compliance with Cyber Resilience Act", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "CyberResilienceActReport" - } - } - }, - "resources": { - "ai-suggestion": { - "resources": { - "begin": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create", - "httpMethod": "POST", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin", - "description": "Triggers CRA AI suggestion using user-provided documents.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ] - } - } - }, - "status": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status", - "description": "Returns status of the CRA AI suggestion.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "AiSuggestionStatus" - } - } - } - } - } - }, - "overwrite": { - "methods": { - "overwrite_compliance_check_requirement": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement", - "httpMethod": "PUT", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/overwrite", - "description": "Overwrites compliance check requirement", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ], - "request": { - "$ref": "RequirementOverwrite" - } - } - } - }, - "report": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/report", - "description": "Downloads Cyber Resilience Act compliance report as PDF", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ] - } - } - } - } - } - } - }, - "documents": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.documents.create", - "httpMethod": "POST", - "path": "api/scans/{id}/documents", - "description": "Upload a document for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "DocumentUploadResponse" - } - }, - "delete": { - "id": "analyzer-api-routes.api.scans.documents.delete", - "httpMethod": "DELETE", - "path": "api/scans/{id}/documents/{file_name}", - "description": "Delete a single document for a scan.", - "parameters": { - "file_name": { - "type": "string", - "required": true, - "location": "path", - "description": "Document file name" - }, - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id", - "file_name" - ] - }, - "delete_documents": { - "id": "analyzer-api-routes.api.scans.documents.delete_documents", - "httpMethod": "DELETE", - "path": "api/scans/{id}/documents", - "description": "Delete all documents for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ] - }, - "list": { - "id": "analyzer-api-routes.api.scans.documents.list", - "httpMethod": "GET", - "path": "api/scans/{id}/documents", - "description": "List documents for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "DocumentListResponse" - } - } - } - }, - "overview": { - "methods": { - "get": { - "id": "analyzer-api-routes.api.scans.overview.get", - "httpMethod": "GET", - "path": "api/scans/{scan_id}/overview/{analysis_id}", - "description": "Returns an overview of one analysis.", - "parameters": { - "analysis_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Analysis ID" - }, - "scan_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "scan_id", - "analysis_id" - ], - "response": { - "$ref": "AnalysisOverview" - } - }, - "list": { - "id": "analyzer-api-routes.api.scans.overview.list", - "httpMethod": "GET", - "path": "api/scans/{id}/overview", - "description": "Returns an aggregated overview of all analysis executed for one scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "ScanOverview" - } - } - } - }, - "report": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.report.list", - "httpMethod": "GET", - "path": "api/scans/{id}/report", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ] - } - } - }, - "results": { - "methods": { - "get": { - "id": "analyzer-api-routes.api.scans.results.get", - "httpMethod": "GET", - "path": "api/scans/{scan_id}/results/{analysis_id}", - "description": "Retrieve the results of one specific analysis of a scan.", - "parameters": { - "analysis_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Analysis ID" - }, - "page": { - "type": "integer", - "required": false, - "location": "query", - "description": "Page number (must be > 0). If provided, `per-page` must also be provided.", - "format": "int32" - }, - "per-page": { - "type": "integer", - "required": false, - "location": "query", - "description": "Items per page (must be > 0). If provided, `page` must also be provided.", - "format": "int32" - }, - "query": { - "type": "string", - "required": true, - "location": "query", - "description": "Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types." - }, - "scan_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "scan_id", - "analysis_id" - ], - "response": { - "$ref": "AnalysisResultDTO" - } - } - } - }, - "sbom": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.sbom.list", - "httpMethod": "GET", - "path": "api/scans/{id}/sbom", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": [ - "id" - ] - } - } - }, - "score": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.score.list", - "httpMethod": "GET", - "path": "api/scans/{id}/score", - "description": "Returns a security score of all successful finished analyses with their individual scores included.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "ScanScore" - } - } - } - }, - "status": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.status.list", - "httpMethod": "GET", - "path": "api/scans/{id}/status", - "description": "Returns the status of a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": [ - "id" - ], - "response": { - "$ref": "ScanStatus" - } - } - } - }, - "types": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.types.list", - "httpMethod": "GET", - "path": "api/scans/types", - "description": "Returns a list of all available analysis types for each different image." - } - } - } - } - } - } - } - } -} \ No newline at end of file +{"kind":"discovery#restDescription","discoveryVersion":"v1","id":"analyzer-api-routes:0.5.0","name":"analyzer-api-routes","version":"0.5.0","title":"Analyzer API routes","description":"","protocol":"rest","rootUrl":"","servicePath":"","schemas":{"AiResult":{"id":"AiResult","properties":{"reasoning":{"description":"AI reasoning, which lead to current status","type":"string"},"sources":{"description":"List of documents used by AI to produce current status.","items":{"$ref":"UserUploadedDocument"},"type":"array"},"status":{"$ref":"AiStatus"},"user-action":{"$ref":"SuggestionResponse"}},"type":"object"},"AiStatus":{"description":"Represents the status of a requirement determined by ai","enum":["passed","failed","unknown"],"id":"AiStatus","type":"string"},"AiSuggestionStatus":{"description":"Status of the AI suggestions computation.","id":"AiSuggestionStatus","properties":{"status":{"$ref":"Status"}},"type":"object"},"AnalysisFilter":{"id":"AnalysisFilter","properties":{"query-name":{"$ref":"QueryName"},"values":{"description":"Avaliable filter values with their count.","items":{"$ref":"FilterValue"},"type":"array"}},"type":"object"},"AnalysisFindings":{"description":"Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.","id":"AnalysisFindings"},"AnalysisId":{"description":"A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.","format":"uuid","id":"AnalysisId","type":"string"},"AnalysisInfo":{"description":"Helper struct to define if a analysis should be by default enabled","id":"AnalysisInfo","properties":{"default":{"type":"boolean"},"type":{"type":"string"}},"type":"object"},"AnalysisOverview":{"description":"Like [`ScanOverview`] but for single analysis.","id":"AnalysisOverview"},"AnalysisQueryUnion":{"description":"Union of all available query parameters for analyses.","id":"AnalysisQueryUnion"},"AnalysisResultDTO":{"description":"AnalysisResult but with count of all findings,\nbefore pagination was applied.","id":"AnalysisResultDTO","properties":{"filters":{"description":"Filters that can be used in this analysis.","type":"object"},"findings":{"$ref":"AnalysisFindings"},"total-findings":{"description":"Total count of findings _after_ filtering, but _before_ pagination.","format":"int64","type":"integer"}},"type":"object"},"AnalysisScore":{"description":"The score of an analysis,","id":"AnalysisScore","properties":{"id":{"$ref":"AnalysisId"},"score":{"$ref":"Score"},"type":{"$ref":"AnalysisType"}},"type":"object"},"AnalysisState":{"description":"A analysis that runs for one particular system image.","id":"AnalysisState","properties":{"id":{"$ref":"AnalysisId"},"status":{"$ref":"AnalysisStatus"},"type":{"$ref":"ScanType"}},"type":"object"},"AnalysisStatus":{"description":"Represents the current execution status of an analysis task.","enum":["success","pending","in-progress","canceled","error"],"id":"AnalysisStatus","type":"string"},"AnalysisType":{"description":"Type of the analysis","enum":["info","kernel","cve","password-hash","hardening","malware","software-bom","crypto","capabilities","symbols","tasks","stack-overflow"],"id":"AnalysisType","type":"string"},"AnalyzerResult":{"id":"AnalyzerResult","properties":{"status":{"$ref":"AnalyzerStatus"}},"type":"object"},"AnalyzerStatus":{"description":"Represents the status of a requirement determined by analyzer","enum":["passed","failed","unknown","not-applicable"],"id":"AnalyzerStatus","type":"string"},"ApiScanType":{"description":"List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.","id":"ApiScanType"},"BindFilter":{"enum":["local","global","weak"],"id":"BindFilter","type":"string"},"CapabilitiesOverview":{"description":"Overview for Capability analysis.","id":"CapabilitiesOverview","properties":{"capabilities":{"description":"Capability found and their number of occurrences.","type":"object"},"counts":{"$ref":"RiskLevelCount"},"executable_count":{"description":"Total number executables.","format":"int64","type":"integer"}},"type":"object"},"CapabilityParams":{"id":"CapabilityParams","properties":{"search":{"type":"string"},"severity-filter":{"items":{"$ref":"SeverityFilter"},"type":"array"},"sort-by":{"$ref":"CapabilitySortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"CapabilitySortBy":{"enum":["severity"],"id":"CapabilitySortBy","type":"string"},"Checks":{"description":"Represents the checks performed in the report","id":"Checks","properties":{"failed":{"description":"Number of checks that failed (determined either by analyzer or overwritten by the user)","format":"int32","type":"integer"},"not-applicable":{"description":"Number of not applicable requirements","format":"int32","type":"integer"},"passed":{"description":"Number of checks that passed (determined either by analyzer or overwritten by the user)","format":"int32","type":"integer"},"suggestion-available":{"description":"Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.","format":"int32","type":"integer"},"total":{"description":"Total number of checks performed","format":"int32","type":"integer"},"unknown":{"description":"Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.","format":"int32","type":"integer"}},"type":"object"},"ComponentType":{"enum":["application","framework","library","container","operating-system","device","firmware","file"],"id":"ComponentType","type":"string"},"CreateObject":{"description":"The request to create a new object.","id":"CreateObject","properties":{"description":{"description":"Description of the object.","type":"string"},"name":{"description":"Name of the object.","type":"string"},"tags":{"description":"Tags associated with the object.","items":{"type":"string"},"type":"array"}},"type":"object"},"CryptoOverview":{"description":"Overview for Crypto analysis.","id":"CryptoOverview","properties":{"certificates":{"description":"Number of certificates found.","format":"int64","type":"integer"},"private_keys":{"description":"Number of private keys found.","format":"int64","type":"integer"},"public_keys":{"description":"Number of public keys found.","format":"int64","type":"integer"}},"type":"object"},"CryptoParams":{"id":"CryptoParams","properties":{"search":{"type":"string"},"sort-by":{"$ref":"CryptoSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"CryptoTypeFilter"},"type":"array"}},"type":"object"},"CryptoSortBy":{"enum":["type","key-size","filename","path","issuer"],"id":"CryptoSortBy","type":"string"},"CryptoTypeFilter":{"enum":["certificate","private-key","public-key"],"id":"CryptoTypeFilter","type":"string"},"CveOverview":{"description":"Overview for Cve analysis.","id":"CveOverview","properties":{"counts":{"$ref":"CveSeverityCount"},"products":{"description":"Cve counts for each \"product\" (binary, library, etc.).","type":"object"},"total":{"description":"Sum of all `counts`.","format":"int64","type":"integer"}},"type":"object"},"CveParams":{"id":"CveParams","properties":{"patch-filter":{"items":{"$ref":"CvePatchFilter"},"type":"array"},"search":{"type":"string"},"severity-filter":{"items":{"$ref":"CveSeverityFilter"},"type":"array"},"sort-by":{"$ref":"CveSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"CvePatchFilter":{"enum":["available","unavailable"],"id":"CvePatchFilter","type":"string"},"CveSeverityCount":{"description":"Maps CVE severity to its count","id":"CveSeverityCount","properties":{"critical":{"format":"int64","type":"integer"},"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"},"unknown":{"format":"int64","type":"integer"}},"type":"object"},"CveSeverityFilter":{"enum":["low","medium","high","critical"],"id":"CveSeverityFilter","type":"string"},"CveSortBy":{"enum":["severity"],"id":"CveSortBy","type":"string"},"CyberResilienceActReport":{"description":"Represents a Cyber Resilience Act report","id":"CyberResilienceActReport","properties":{"checks":{"$ref":"Checks"},"created-at":{"description":"Date and time when the report was created.","format":"date-time","type":"string"},"name":{"description":"Name of the report.","type":"string"},"sections":{"description":"List of categories in the report.","items":{"$ref":"Section"},"type":"array"},"updated-at":{"description":"Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.","format":"date-time","type":"string"}},"type":"object"},"DockerAnalysis":{"description":"Represents different types of analyses for Docker containers.","enum":["info","cve","password-hash","crypto","software-bom","malware","hardening","capabilities"],"id":"DockerAnalysis","type":"string"},"DockerInfo":{"description":"Container metadata information\n\nRepresents various metadata attributes of a container image","id":"DockerInfo","properties":{"arch":{"description":"List of supported CPU architectures for the container","items":{"type":"string"},"type":"array"},"ctime":{"description":"List of creation timestamps for container layers","items":{"type":"string"},"type":"array"},"env":{"description":"List of environment variables defined in the container","items":{"type":"string"},"type":"array"},"history":{"description":"List of commands used to build the container layers","items":{"$ref":"History"},"type":"array"},"os":{"description":"List of supported operating systems for the container","items":{"type":"string"},"type":"array"},"os_name":{"description":"Name of the base operating system used in the container","type":"string"},"os_version":{"description":"Version of the base operating system used in the container","type":"string"},"tags":{"description":"List of container image tags associated with the image","items":{"type":"string"},"type":"array"}},"type":"object"},"DockerInfoResult":{"description":"Info result for docker image","id":"DockerInfoResult"},"DocumentListItem":{"description":"A single document entry in a listing.","id":"DocumentListItem","properties":{"file-name":{"description":"Original file name, serves as the unique key within a scan's document storage","type":"string"}},"type":"object"},"DocumentListResponse":{"description":"A list of documents associated with a scan.","id":"DocumentListResponse","properties":{"documents":{"items":{"$ref":"DocumentListItem"},"type":"array"}},"type":"object"},"DocumentUploadResponse":{"description":"The response after successfully uploading a document.","id":"DocumentUploadResponse","properties":{"file-name":{"description":"Original file name, serves as the unique key within a scan's document storage","type":"string"}},"type":"object"},"FeaturesFilter":{"enum":["seccomp","seccomp-filter","security-network","stack-protector","fortify-source","vmap-kernel-stack","usercopy","heap-freelist-obfuscation","executable-memory-protection","kaslr","apparmor","selinux","smack","tomoyo","yama"],"id":"FeaturesFilter","type":"string"},"FilterValue":{"id":"FilterValue","properties":{"count":{"description":"Count of findings matching this value for current filter options.","format":"int64","type":"integer"},"value":{"description":"Filter value that can be passed in query paramters.","type":"string"}},"type":"object"},"HardeningOverview":{"description":"Overview for Hardening analysis.","id":"HardeningOverview","properties":{"counts":{"$ref":"HardeningSeverityCount"},"total":{"description":"Sum of all `counts`.","format":"int64","type":"integer"}},"type":"object"},"HardeningParams":{"id":"HardeningParams","properties":{"search":{"type":"string"},"severity-filter":{"items":{"$ref":"HardeningSeverityFilter"},"type":"array"},"sort-by":{"$ref":"HardeningSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"HardeningSeverityCount":{"description":"Maps Hardening severity to its count","id":"HardeningSeverityCount","properties":{"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"}},"type":"object"},"HardeningSeverityFilter":{"enum":["low","medium","high"],"id":"HardeningSeverityFilter","type":"string"},"HardeningSortBy":{"enum":["severity","filename","canary","nx","pie","relro","fortify"],"id":"HardeningSortBy","type":"string"},"HealthStatus":{"description":"Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.","id":"HealthStatus","properties":{"healthy":{"type":"boolean"},"message":{"type":"string"}},"type":"object"},"History":{"id":"History","properties":{"created":{"format":"date-time","type":"string"},"created_by":{"type":"string"},"empty_layer":{"type":"boolean"}},"type":"object"},"IdfAnalysis":{"description":"Represents analyses specific to IDF (IoT Device Framework) targets.","enum":["info","cve","software-bom","symbols","tasks","stack-overflow"],"id":"IdfAnalysis","type":"string"},"IdfInfo":{"description":"IdfInfo analysis entry for idf image","id":"IdfInfo","properties":{"arch":{"description":"Architecture type","type":"string"},"compiler":{"description":"Compiler name and version used to create this image","type":"string"},"freertos":{"description":"freertos version","type":"string"},"idf":{"description":"idf version","type":"string"}},"type":"object"},"IdfInfoResult":{"description":"Info result for idf image","id":"IdfInfoResult"},"IdfSymbolParams":{"id":"IdfSymbolParams","properties":{"bind-filter":{"items":{"$ref":"BindFilter"},"type":"array"},"search":{"type":"string"},"sort-by":{"$ref":"IdfSymbolSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"TypeFilter"},"type":"array"}},"type":"object"},"IdfSymbolSortBy":{"enum":["name"],"id":"IdfSymbolSortBy","type":"string"},"IdfTaskParams":{"id":"IdfTaskParams","properties":{"search":{"type":"string"},"sort-by":{"$ref":"IdfTaskSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"IdfTaskSortBy":{"enum":["function","name"],"id":"IdfTaskSortBy","type":"string"},"Image":{"description":"A image on which a scan is executed","id":"Image","properties":{"file_name":{"description":"The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.","type":"string"},"id":{"$ref":"ImageId"}},"type":"object"},"ImageId":{"description":"A wrapper struct `ImageId` around a UUID.\n ID in the images table.","format":"uuid","id":"ImageId","type":"string"},"ImageType":{"description":"Type of the image used in scan","enum":["linux","docker","idf"],"id":"ImageType","type":"string"},"Info":{"id":"Info"},"InfoOverview":{"id":"InfoOverview"},"KernelOverview":{"description":"Overview for Kernel analysis.","id":"KernelOverview","properties":{"count":{"description":"Number of kernel security features enabled.","format":"int64","type":"integer"}},"type":"object"},"KernelParams":{"id":"KernelParams","properties":{"features-filter":{"items":{"$ref":"FeaturesFilter"},"type":"array"},"sort-by":{"$ref":"KernelSortBy"},"sort-ord":{"$ref":"SortOrd"},"status-filter":{"items":{"$ref":"StatusFilter"},"type":"array"}},"type":"object"},"KernelSortBy":{"enum":["features","status"],"id":"KernelSortBy","type":"string"},"LinuxAnalysis":{"description":"Represents different types of analyses that can be performed on a Linux system.","enum":["info","kernel","cve","password-hash","crypto","software-bom","malware","hardening","capabilities"],"id":"LinuxAnalysis","type":"string"},"LinuxInfo":{"description":"Represents the information about the system","id":"LinuxInfo","properties":{"arch":{"description":"The tags associated with the system","type":"string"},"banner":{"description":"The operating system name","type":"string"},"kernel_version":{"description":"The kernel version","type":"string"},"libc":{"description":"The operating system version","type":"string"}},"type":"object"},"LinuxInfoResult":{"description":"Info result for linux image","id":"LinuxInfoResult"},"MalwareOverview":{"description":"Overview for Malware analysis.","id":"MalwareOverview","properties":{"count":{"description":"Number of malware detected.","format":"int64","type":"integer"}},"type":"object"},"MalwareParams":{"id":"MalwareParams","properties":{"sort-by":{"$ref":"MalwareSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"MalwareSortBy":{"enum":["filename"],"id":"MalwareSortBy","type":"string"},"NewScanResponse":{"description":"The response if a new scan is created.","id":"NewScanResponse","properties":{"id":{"$ref":"ScanId"}},"type":"object"},"ObjectId":{"description":"A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.","format":"uuid","id":"ObjectId","type":"string"},"PasswordHashOverview":{"description":"Overview for Password Hash analysis.","id":"PasswordHashOverview","properties":{"count":{"description":"Number of passwords decoded.","format":"int64","type":"integer"}},"type":"object"},"PasswordHashParams":{"id":"PasswordHashParams","properties":{"severity-filter":{"items":{"$ref":"PasswordHashSeverityFilter"},"type":"array"},"sort-by":{"$ref":"PasswordHashSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"PasswordHashSeverityFilter":{"enum":["medium","high"],"id":"PasswordHashSeverityFilter","type":"string"},"PasswordHashSortBy":{"enum":["severity","username"],"id":"PasswordHashSortBy","type":"string"},"QueryName":{"description":"Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.","enum":["license-filter"],"id":"QueryName","type":"string"},"Requirement":{"description":"Represents a requirement in the report","id":"Requirement","properties":{"advice":{"description":"Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.","type":"string"},"ai-suggestion":{"$ref":"AiResult"},"analyzer":{"$ref":"AnalyzerResult"},"description":{"description":"Description of the requirement.","type":"string"},"explanation":{"description":"Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.","type":"string"},"id":{"$ref":"RequirementId"},"policy-ref":{"description":"Reference to the policy associated with the requirement.","type":"string"},"status":{"$ref":"RequirementStatus"},"user-overwrite":{"$ref":"UserResult"}},"type":"object"},"RequirementId":{"description":"Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.","enum":["cve-exploits","password-strength","security-updates","update-notifications","access-control","unauthorized-access","data-encryption","data-integrity","data-collection","essential-availability","minimise-impact","attack-surfaces","attack-reduction","activity-monitoring","data-removal","vulns-documentation","vulns-security-updates","update-security-and-automation","security-testing-and-review","fixed-vulns-disclosure","vulns-coordinated-disclosure","vulns-reporting-contact","security-updates-dissemination"],"id":"RequirementId","type":"string"},"RequirementOverwrite":{"description":"User action on a CRA requirement — either a manual overwrite or an AI suggestion response.","id":"RequirementOverwrite"},"RequirementStatus":{"description":"Overall status of the requirement\ncomputed by taking into account all user interactions.","enum":["passed","failed","unknown","unknown-with-suggestion","not-applicable"],"id":"RequirementStatus","type":"string"},"RiskLevelCount":{"description":"Count all different risk levels of the analysis.","id":"RiskLevelCount","properties":{"critical":{"format":"int64","type":"integer"},"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"},"none":{"format":"int64","type":"integer"},"unknown":{"format":"int64","type":"integer"}},"type":"object"},"SbomParams":{"id":"SbomParams","properties":{"license-filter":{"items":{"type":"string"},"type":"array"},"search":{"type":"string"},"sort-by":{"$ref":"SbomSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"ComponentType"},"type":"array"}},"type":"object"},"SbomSortBy":{"enum":["name"],"id":"SbomSortBy","type":"string"},"Scan":{"description":"Represents a scan that aggregates multiple analyses executed on a particular image.","id":"Scan","properties":{"analysis":{"description":"All analyses processed as part of this scan.","items":{"$ref":"AnalysisState"},"type":"array"},"created":{"description":"The date and time when the scan was initiated.","format":"date-time","type":"string"},"id":{"$ref":"ScanId"},"image":{"$ref":"Image"},"image_type":{"$ref":"ImageType"},"info":{"$ref":"Info"},"score":{"$ref":"ScanScore"}},"type":"object"},"ScanId":{"description":"A wrapper struct `ScanId` around a UUID.\n ID in the scans table.","format":"uuid","id":"ScanId","type":"string"},"ScanOverview":{"description":"Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.","id":"ScanOverview","properties":{"capabilities":{"$ref":"CapabilitiesOverview"},"crypto":{"$ref":"CryptoOverview"},"cve":{"$ref":"CveOverview"},"hardening":{"$ref":"HardeningOverview"},"info":{"$ref":"InfoOverview"},"kernel":{"$ref":"KernelOverview"},"malware":{"$ref":"MalwareOverview"},"password-hash":{"$ref":"PasswordHashOverview"},"software-bom":{"$ref":"SoftwareBOMOverview"},"stack-overflow":{"$ref":"StackOverflowOverview"},"symbols":{"$ref":"SymbolsOverview"},"tasks":{"$ref":"TasksOverview"}},"type":"object"},"ScanScore":{"description":"The calculate score with an weighted algorithm over all analysis.","id":"ScanScore","properties":{"score":{"$ref":"Score"},"scores":{"description":"Individual analyses scores.","items":{"$ref":"AnalysisScore"},"type":"array"}},"type":"object"},"ScanStatus":{"description":"The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).","id":"ScanStatus","properties":{"id":{"$ref":"ScanId"},"status":{"$ref":"AnalysisStatus"}},"type":"object"},"ScanType":{"description":"Represents a unified type for analyses across all supported images.","id":"ScanType"},"Score":{"description":"Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.","format":"int32","id":"Score","type":"integer"},"Section":{"description":"Represents a group of requirements, grouped by [SubSection]s.","id":"Section","properties":{"label":{"description":"Name of the requirement","type":"string"},"policy-ref":{"description":"Reference to the policy associated with the requirement","type":"string"},"sub-sections":{"description":"List of sub-requirements or checks associated with this requirement","items":{"$ref":"SubSection"},"type":"array"}},"type":"object"},"SeverityFilter":{"enum":["none","low","medium","high","critical","unknown"],"id":"SeverityFilter","type":"string"},"SoftwareBOMOverview":{"description":"Overview for Software BOM analysis.","id":"SoftwareBOMOverview","properties":{"count":{"description":"Total number of software BOM entries.","format":"int64","type":"integer"},"licenses":{"description":"License type and their number of occurrences.","type":"object"}},"type":"object"},"SortOrd":{"enum":["asc","desc"],"id":"SortOrd","type":"string"},"StackOverflowOverview":{"description":"Overview for Stack Overflow analysis.","id":"StackOverflowOverview","properties":{"method":{"description":"Name of the protection method used,\nor `None` if stack overflow protection is not enabled.","type":"string"}},"type":"object"},"Status":{"description":"Status of the AI suggestions computation.","enum":["in-progress","finished"],"id":"Status","type":"string"},"StatusFilter":{"enum":["enabled","disabled"],"id":"StatusFilter","type":"string"},"SubSection":{"description":"Represents a group of requirements","id":"SubSection","properties":{"label":{"description":"Name of the requirement","type":"string"},"requirements":{"description":"List of sub-requirements or checks associated with this requirement","items":{"$ref":"Requirement"},"type":"array"}},"type":"object"},"SuggestionResponse":{"description":"User response to AI suggestion.","enum":["accepted","rejected"],"id":"SuggestionResponse","type":"string"},"SymbolsOverview":{"description":"Overview for Symbol analysis.","id":"SymbolsOverview","properties":{"count":{"description":"Number of analyzed symbols.","format":"int64","type":"integer"}},"type":"object"},"TasksOverview":{"description":"Overview for Task analysis.","id":"TasksOverview","properties":{"count":{"description":"Number of analysed tasks.","format":"int64","type":"integer"}},"type":"object"},"TypeFilter":{"enum":["sect","func","obj","file","notype"],"id":"TypeFilter","type":"string"},"UpdateObject":{"description":"The request to update fields on an [`Object`].","id":"UpdateObject","properties":{"description":{"description":"Description of the object.","type":"string"},"favorite":{"description":"Sets if the object is a favorite or not.","type":"boolean"},"name":{"description":"Name of the object.","type":"string"},"tags":{"description":"The tags associated with the object.","items":{"type":"string"},"type":"array"}},"type":"object"},"UserId":{"description":"A wrapper struct `UserId` around a UUID.\n ID in the users table.","format":"uuid","id":"UserId","type":"string"},"UserResult":{"id":"UserResult","properties":{"status":{"$ref":"UserStatus"}},"type":"object"},"UserStatus":{"description":"Represents the status of a requirement overwritten by the user","enum":["passed","failed"],"id":"UserStatus","type":"string"},"UserUploadedDocument":{"description":"Description of the user provided file\nused by ai to give its suggestion.","id":"UserUploadedDocument","properties":{"filename":{"description":"Name of the user uploaded file.","type":"string"}},"type":"object"}},"resources":{"api":{"resources":{"health":{"methods":{"list":{"id":"analyzer-api-routes.api.health.list","httpMethod":"GET","path":"api/health","description":"Returns if the service is in an healthy state.","response":{"$ref":"HealthStatus"}}}},"objects":{"methods":{"create":{"id":"analyzer-api-routes.api.objects.create","httpMethod":"POST","path":"api/objects","description":"Create new object","request":{"$ref":"CreateObject"}},"delete":{"id":"analyzer-api-routes.api.objects.delete","httpMethod":"DELETE","path":"api/objects/{id}","description":"Deletes a object and all related scans.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Unique identifier of the object to delete"}},"parameterOrder":["id"]},"get":{"id":"analyzer-api-routes.api.objects.get","httpMethod":"GET","path":"api/objects/{id}","description":"Retrieve an object by its ID.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"]},"list":{"id":"analyzer-api-routes.api.objects.list","httpMethod":"GET","path":"api/objects","description":"Retrieve a list of all objects of the current user.","parameters":{"end_timestamp":{"type":"string","required":false,"location":"query","description":"End timestamp for pagination.","format":"datetime"},"id":{"type":"string","required":false,"location":"query","description":"Pagination cursor (UUID).","format":"uuid"},"limit":{"type":"integer","required":false,"location":"query","description":"Maximum number of items per page.","format":"int32"},"start_timestamp":{"type":"string","required":false,"location":"query","description":"Start timestamp for pagination.","format":"datetime"}}},"update":{"id":"analyzer-api-routes.api.objects.update","httpMethod":"PUT","path":"api/objects/{id}","description":"Update an object","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"],"request":{"$ref":"UpdateObject"}}},"resources":{"scans":{"methods":{"list":{"id":"analyzer-api-routes.api.objects.scans.list","httpMethod":"GET","path":"api/objects/{id}/scans","description":"Those scans could be","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"]}}}}},"scans":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.create","httpMethod":"POST","path":"api/scans","description":"Schedule a new scan.","response":{"$ref":"NewScanResponse"}},"delete":{"id":"analyzer-api-routes.api.scans.delete","httpMethod":"DELETE","path":"api/scans/{id}","description":"Delete a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]},"get":{"id":"analyzer-api-routes.api.scans.get","httpMethod":"GET","path":"api/scans/{id}","description":"Returns a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"Scan"}},"list":{"id":"analyzer-api-routes.api.scans.list","httpMethod":"GET","path":"api/scans","description":"Retrieve a list of scans."}},"resources":{"cancel":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.cancel.create","httpMethod":"POST","path":"api/scans/{id}/cancel","description":"This can be used to cancel an already pending or running scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]}}},"compliance-check":{"resources":{"cyber-resilience-act":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act","description":"Computes compliance with Cyber Resilience Act","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"response":{"$ref":"CyberResilienceActReport"}}},"resources":{"ai-suggestion":{"resources":{"begin":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create","httpMethod":"POST","path":"api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin","description":"Triggers CRA AI suggestion using user-provided documents.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"status":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status","description":"Returns status of the CRA AI suggestion.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"response":{"$ref":"AiSuggestionStatus"}}}}}},"overwrite":{"methods":{"overwrite_compliance_check_requirement":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement","httpMethod":"PUT","path":"api/scans/{id}/compliance-check/cyber-resilience-act/overwrite","description":"Overwrites compliance check requirement","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"request":{"$ref":"RequirementOverwrite"}}}},"report":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act/report","description":"Downloads Cyber Resilience Act compliance report as PDF","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}}}}}},"documents":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.documents.create","httpMethod":"POST","path":"api/scans/{id}/documents","description":"Upload a document for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"DocumentUploadResponse"}},"delete":{"id":"analyzer-api-routes.api.scans.documents.delete","httpMethod":"DELETE","path":"api/scans/{id}/documents/{file_name}","description":"Delete a single document for a scan.","parameters":{"file_name":{"type":"string","required":true,"location":"path","description":"Document file name"},"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id","file_name"]},"delete_documents":{"id":"analyzer-api-routes.api.scans.documents.delete_documents","httpMethod":"DELETE","path":"api/scans/{id}/documents","description":"Delete all documents for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]},"list":{"id":"analyzer-api-routes.api.scans.documents.list","httpMethod":"GET","path":"api/scans/{id}/documents","description":"List documents for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"DocumentListResponse"}}}},"overview":{"methods":{"get":{"id":"analyzer-api-routes.api.scans.overview.get","httpMethod":"GET","path":"api/scans/{scan_id}/overview/{analysis_id}","description":"Returns an overview of one analysis.","parameters":{"analysis_id":{"type":"string","required":true,"location":"path","description":"Analysis ID"},"scan_id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["scan_id","analysis_id"],"response":{"$ref":"AnalysisOverview"}},"list":{"id":"analyzer-api-routes.api.scans.overview.list","httpMethod":"GET","path":"api/scans/{id}/overview","description":"Returns an aggregated overview of all analysis executed for one scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanOverview"}}}},"report":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.report.list","httpMethod":"GET","path":"api/scans/{id}/report","description":"Downloads a PDF security report for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"results":{"methods":{"get":{"id":"analyzer-api-routes.api.scans.results.get","httpMethod":"GET","path":"api/scans/{scan_id}/results/{analysis_id}","description":"Retrieve the results of one specific analysis of a scan.","parameters":{"analysis_id":{"type":"string","required":true,"location":"path","description":"Analysis ID"},"page":{"type":"integer","required":false,"location":"query","description":"Page number (must be > 0). If provided, `per-page` must also be provided.","format":"int32"},"per-page":{"type":"integer","required":false,"location":"query","description":"Items per page (must be > 0). If provided, `page` must also be provided.","format":"int32"},"query":{"type":"string","required":true,"location":"query","description":"Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types."},"scan_id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["scan_id","analysis_id"],"response":{"$ref":"AnalysisResultDTO"}}}},"sbom":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.sbom.list","httpMethod":"GET","path":"api/scans/{id}/sbom","description":"Downloads the SBOM (CycloneDX JSON) for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"score":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.score.list","httpMethod":"GET","path":"api/scans/{id}/score","description":"Returns a security score of all successful finished analyses with their individual scores included.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanScore"}}}},"status":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.status.list","httpMethod":"GET","path":"api/scans/{id}/status","description":"Returns the status of a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanStatus"}}}},"types":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.types.list","httpMethod":"GET","path":"api/scans/types","description":"Returns a list of all available analysis types for each different image."}}}}}}}}} \ No newline at end of file diff --git a/docs/skills.md b/docs/skills.md index 412226b..c825afd 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -4,8 +4,9 @@ | Skill | Description | |-------|-------------| -| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling. | -| [analyzer-health](../skills/analyzer-health/SKILL.md) | Check API service health. | -| [analyzer-objects](../skills/analyzer-objects/SKILL.md) | Manage firmware/software objects (CRUD operations). | -| [analyzer-scans](../skills/analyzer-scans/SKILL.md) | Manage scans, results, compliance checks, reports, and documents. | +| [analyzer-health](../skills/analyzer-health/SKILL.md) | API operations for analyzer-health. | +| [analyzer-objects](../skills/analyzer-objects/SKILL.md) | API operations for analyzer-objects. | +| [analyzer-scans](../skills/analyzer-scans/SKILL.md) | API operations for analyzer-scans. | +| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | API operations for analyzer-shared. | +| [shared](../skills/shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling. | diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index 87481b7..550a3e1 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -1,21 +1,21 @@ --- name: analyzer-health version: 0.5.0 -description: "Check API service health." +description: "Manage health via the analyzer API — Firmware and software image security analysis" metadata: openclaw: category: "security" requires: bins: ["analyzer"] - cliHelp: "analyzer api health --help" + cliHelp: "analyzer api analyzer health --help" --- -# analyzer health +# analyzer-health -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. ```bash -analyzer api health [flags] +analyzer api analyzer health [flags] ``` ## API Methods @@ -36,10 +36,10 @@ Before calling any API method, inspect it: ```bash # Browse resources and methods -analyzer api health --help +analyzer api analyzer health --help # Inspect a method's required params, types, and defaults -analyzer schema api.health. +analyzer schema analyzer.health. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. @@ -47,12 +47,12 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. ## Examples ```bash -# Check service health -analyzer api health list +# Returns if the service is in an healthy state. +analyzer api analyzer health list ``` ## See Also -- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [shared](../shared/SKILL.md) — Global flags and auth - [CONTEXT.md](../../CONTEXT.md) — Full agent reference diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index dd74682..a206668 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -1,21 +1,21 @@ --- name: analyzer-objects version: 0.5.0 -description: "Manage firmware/software objects (CRUD operations)." +description: "Manage objects via the analyzer API — Firmware and software image security analysis" metadata: openclaw: category: "security" requires: bins: ["analyzer"] - cliHelp: "analyzer api objects --help" + cliHelp: "analyzer api analyzer objects --help" --- -# analyzer objects +# analyzer-objects -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. ```bash -analyzer api objects [flags] +analyzer api analyzer objects [flags] ``` ## API Methods @@ -44,10 +44,10 @@ Before calling any API method, inspect it: ```bash # Browse resources and methods -analyzer api objects --help +analyzer api analyzer objects --help # Inspect a method's required params, types, and defaults -analyzer schema api.objects. +analyzer schema analyzer.objects. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. @@ -55,21 +55,24 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. ## Examples ```bash -# List all objects -analyzer api objects list +# Create new object +analyzer api analyzer objects create --json '{...}' --dry-run -# Get a specific object -analyzer api objects get --params '{"id": "OBJ_ID"}' +# Deletes a object and all related scans. +analyzer api analyzer objects delete --params '{"id": "..."}' --dry-run -# Create an object (dry-run first) -analyzer api objects create --json '{"name": "Router FW", "description": "Edge router firmware"}' --dry-run +# Retrieve an object by its ID. +analyzer api analyzer objects get --params '{"id": "..."}' -# Delete an object -analyzer api objects delete --params '{"id": "OBJ_ID"}' --dry-run +# Retrieve a list of all objects of the current user. +analyzer api analyzer objects list + +# Update an object +analyzer api analyzer objects update --params '{"id": "..."}' --json '{...}' --dry-run ``` ## See Also -- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [shared](../shared/SKILL.md) — Global flags and auth - [CONTEXT.md](../../CONTEXT.md) — Full agent reference diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index b1438a4..0bb5008 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -1,21 +1,21 @@ --- name: analyzer-scans version: 0.5.0 -description: "Manage scans, results, compliance checks, reports, and documents." +description: "Manage scans via the analyzer API — Firmware and software image security analysis" metadata: openclaw: category: "security" requires: bins: ["analyzer"] - cliHelp: "analyzer api scans --help" + cliHelp: "analyzer api analyzer scans --help" --- -# analyzer scans +# analyzer-scans -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. ```bash -analyzer api scans [flags] +analyzer api analyzer scans [flags] ``` ## API Methods @@ -67,7 +67,7 @@ analyzer api scans [flags] ### report - - `list` (GET) — + - `list` (GET) — Downloads a PDF security report for a scan. ### results @@ -75,7 +75,7 @@ analyzer api scans [flags] ### sbom - - `list` (GET) — + - `list` (GET) — Downloads the SBOM (CycloneDX JSON) for a scan. ### score @@ -103,10 +103,10 @@ Before calling any API method, inspect it: ```bash # Browse resources and methods -analyzer api scans --help +analyzer api analyzer scans --help # Inspect a method's required params, types, and defaults -analyzer schema api.scans. +analyzer schema analyzer.scans. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. @@ -114,30 +114,21 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. ## Examples ```bash -# List scans -analyzer api scans list +# Schedule a new scan. +analyzer api analyzer scans create --dry-run -# Schedule a scan (dry-run first) -analyzer api scans create --json '{...}' --dry-run +# Delete a scan. +analyzer api analyzer scans delete --params '{"id": "..."}' --dry-run -# Poll scan status -analyzer api scans status list --params '{"id": "SCAN_ID"}' +# Returns a scan. +analyzer api analyzer scans get --params '{"id": "..."}' -# Get security score -analyzer api scans score list --params '{"id": "SCAN_ID"}' - -# Get scan overview -analyzer api scans overview list --params '{"id": "SCAN_ID"}' - -# Get CVE results -analyzer api scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve"}' - -# Check CRA compliance -analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' +# Retrieve a list of scans. +analyzer api analyzer scans list ``` ## See Also -- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth +- [shared](../shared/SKILL.md) — Global flags and auth - [CONTEXT.md](../../CONTEXT.md) — Full agent reference diff --git a/skills/shared/SKILL.md b/skills/shared/SKILL.md new file mode 100644 index 0000000..4b52578 --- /dev/null +++ b/skills/shared/SKILL.md @@ -0,0 +1,84 @@ +--- +name: shared +description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- + +# analyzer — Shared Reference + +## Registered Services + +| Alias | API | Description | +|-------|-----|-------------| +| `analyzer` | analyzer-api-routes | Firmware and software image security analysis | + +## Authentication + +```bash +# Interactive login (prompts for API key, validates, saves) +analyzer login + +# Environment variable +export ANALYZER_API_KEY="your-api-key" +``` + +## Global Flags + +| Flag | Description | +|------|-------------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--dry-run` | Validate and print request without executing | +| `--discovery ` | Override discovery document (dev/testing) | +| `--format ` | Output format: `human` (default), `json` | + +## CLI Syntax + +```bash +# API commands (service name is first positional arg) +analyzer api [sub-resource] [flags] + +# Schema introspection (service name is first dotted segment) +analyzer schema .. + +# Generate skills for all services +analyzer generate-skills +``` + +## Schema Introspection + +Before calling any API method, inspect it: + +```bash +# Browse all resources for a service +analyzer schema analyzer.api + +# Inspect a specific method +analyzer schema analyzer.scans.create + +# Browse a resource's methods +analyzer schema analyzer.scans.compliance-check +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Security Rules + +- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution +- **Always** confirm with user before executing write/delete commands +- Prefer `--fields` to limit response size and protect the context window +- Poll scan status — do not guess when a scan completes + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{"error": {"code": 404, "message": "Object not found"}} +``` + +Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 77cc9e3..856a600 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -1,8 +1,9 @@ //! Skill file generator. //! -//! `analyzer generate-skills` reads the discovery document and writes -//! markdown skill files to `skills/` — one per top-level API resource, -//! plus a shared skill for global flags, auth, and error handling. +//! `analyzer generate-skills` reads discovery documents for all registered +//! services and writes markdown skill files to `skills/` — one per top-level +//! API resource per service, plus a shared skill for global flags, auth, and +//! error handling. //! //! Follows the same pattern as the gws CLI's `generate-skills` command: //! YAML frontmatter with `metadata.openclaw`, prerequisite notes, @@ -14,19 +15,23 @@ use std::path::Path; use anyhow::{Context, Result}; use crate::discovery::{DiscoveryDocument, DiscoveryMethod, DiscoveryResource}; +use crate::services::{ServiceEntry, SERVICES}; struct SkillIndexEntry { name: String, description: String, } -/// Generate skill files from the discovery document. +/// Generate skill files for a single registered service. /// -/// Writes: -/// - `skills/analyzer-/SKILL.md` for each top-level resource under `api` -/// - `skills/analyzer-shared/SKILL.md` for global flags, auth, and error handling -/// - `docs/skills.md` skills index -pub fn generate(doc: &DiscoveryDocument, output_dir: &Path) -> Result<()> { +/// Writes `skills/{alias}-/SKILL.md` for each top-level resource +/// under `api` in the discovery document. +pub fn generate_for_service( + doc: &DiscoveryDocument, + entry: &ServiceEntry, + output_dir: &Path, +) -> Result<()> { + let alias = entry.aliases[0]; let api_resource = doc .resources .get("api") @@ -35,57 +40,66 @@ pub fn generate(doc: &DiscoveryDocument, output_dir: &Path) -> Result<()> { std::fs::create_dir_all(output_dir) .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; - let mut index: Vec = Vec::new(); - - // Generate the shared skill first (others reference it) - let shared_dir = output_dir.join("analyzer-shared"); - std::fs::create_dir_all(&shared_dir)?; - let shared_content = render_shared_skill(&doc.version); - let shared_path = shared_dir.join("SKILL.md"); - std::fs::write(&shared_path, &shared_content) - .with_context(|| format!("failed to write {}", shared_path.display()))?; - println!(" wrote {}", shared_path.display()); - index.push(SkillIndexEntry { - name: "analyzer-shared".to_string(), - description: "Shared patterns for authentication, global flags, and error handling." - .to_string(), - }); - - // Generate one skill per top-level resource for (resource_name, resource) in &api_resource.resources { - let skill_name = format!("analyzer-{resource_name}"); + let skill_name = format!("{alias}-{resource_name}"); let skill_dir = output_dir.join(&skill_name); std::fs::create_dir_all(&skill_dir)?; - let content = render_resource_skill(resource_name, resource, &doc.version); + let content = render_resource_skill(alias, resource_name, resource, entry, &doc.version); let skill_path = skill_dir.join("SKILL.md"); std::fs::write(&skill_path, &content) .with_context(|| format!("failed to write {}", skill_path.display()))?; - println!(" wrote {}", skill_path.display()); - - let description = match resource_name.as_str() { - "objects" => "Manage firmware/software objects (CRUD operations).".to_string(), - "scans" => { - "Manage scans, results, compliance checks, reports, and documents.".to_string() - } - "health" => "Check API service health.".to_string(), - _ => format!("API operations for {resource_name}."), - }; - index.push(SkillIndexEntry { - name: skill_name, - description, - }); + println!(" wrote {}", skill_path.display()); } - // Write skills index - write_skills_index(&index)?; + Ok(()) +} + +/// Generate the shared skill file (service-agnostic: auth, flags, patterns). +/// +/// Lists all registered services, covers authentication, global flags, +/// schema introspection, and error handling. +pub fn generate_shared(output_dir: &Path) -> Result<()> { + std::fs::create_dir_all(output_dir) + .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; + let shared_dir = output_dir.join("shared"); + std::fs::create_dir_all(&shared_dir)?; + let shared_content = render_shared_skill(); + let shared_path = shared_dir.join("SKILL.md"); + std::fs::write(&shared_path, &shared_content) + .with_context(|| format!("failed to write {}", shared_path.display()))?; + println!(" wrote {}", shared_path.display()); Ok(()) } -/// Write `docs/skills.md` index file. -fn write_skills_index(entries: &[SkillIndexEntry]) -> Result<()> { +/// Write `docs/skills.md` index file listing all generated skills. +pub fn write_skills_index(output_dir: &Path) -> Result<()> { + let mut entries: Vec = Vec::new(); + + // Collect all skill directories that contain a SKILL.md + if output_dir.exists() { + let mut dirs: Vec<_> = std::fs::read_dir(output_dir)? + .filter_map(|e| e.ok()) + .filter(|e| e.file_type().map(|t| t.is_dir()).unwrap_or(false)) + .collect(); + dirs.sort_by_key(|e| e.file_name()); + + for entry in dirs { + let name = entry.file_name().to_string_lossy().to_string(); + let skill_file = entry.path().join("SKILL.md"); + if skill_file.exists() { + let description = if name == "shared" { + "Shared patterns for authentication, global flags, and error handling.".to_string() + } else { + format!("API operations for {name}.") + }; + entries.push(SkillIndexEntry { name, description }); + } + } + } + let mut out = String::new(); writeln!(out, "# Skills Index").unwrap(); writeln!(out).unwrap(); @@ -97,7 +111,7 @@ fn write_skills_index(entries: &[SkillIndexEntry]) -> Result<()> { writeln!(out).unwrap(); writeln!(out, "| Skill | Description |").unwrap(); writeln!(out, "|-------|-------------|").unwrap(); - for entry in entries { + for entry in &entries { writeln!( out, "| [{}](../skills/{}/SKILL.md) | {} |", @@ -112,50 +126,68 @@ fn write_skills_index(entries: &[SkillIndexEntry]) -> Result<()> { let path = docs_dir.join("skills.md"); std::fs::write(&path, &out) .with_context(|| format!("failed to write {}", path.display()))?; - println!(" wrote {}", path.display()); + println!(" wrote {}", path.display()); Ok(()) } +// --------------------------------------------------------------------------- +// Rendering +// --------------------------------------------------------------------------- + /// Render a SKILL.md for a single top-level resource (e.g. "objects", "scans"). -fn render_resource_skill(name: &str, resource: &DiscoveryResource, version: &str) -> String { +fn render_resource_skill( + service_alias: &str, + name: &str, + resource: &DiscoveryResource, + entry: &ServiceEntry, + version: &str, +) -> String { let mut out = String::new(); - let description = match name { - "objects" => "Manage firmware/software objects (CRUD operations).", - "scans" => "Manage scans, results, compliance checks, reports, and documents.", - "health" => "Check API service health.", - _ => "API resource operations.", - }; + let skill_name = format!("{service_alias}-{name}"); // YAML frontmatter (matches gws pattern with metadata.openclaw) writeln!(out, "---").unwrap(); - writeln!(out, "name: analyzer-{name}").unwrap(); + writeln!(out, "name: {skill_name}").unwrap(); writeln!(out, "version: {version}").unwrap(); - writeln!(out, "description: \"{description}\"").unwrap(); + writeln!( + out, + "description: \"Manage {name} via the {service_alias} API — {}\"", + entry.description + ) + .unwrap(); writeln!(out, "metadata:").unwrap(); writeln!(out, " openclaw:").unwrap(); writeln!(out, " category: \"security\"").unwrap(); writeln!(out, " requires:").unwrap(); writeln!(out, " bins: [\"analyzer\"]").unwrap(); - writeln!(out, " cliHelp: \"analyzer api {name} --help\"").unwrap(); + writeln!( + out, + " cliHelp: \"analyzer api {service_alias} {name} --help\"" + ) + .unwrap(); writeln!(out, "---").unwrap(); writeln!(out).unwrap(); // Title - writeln!(out, "# analyzer {name}").unwrap(); + writeln!(out, "# {skill_name}").unwrap(); writeln!(out).unwrap(); // Prerequisite note (gws pattern) writeln!( out, - "> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and error handling." + "> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling." ) .unwrap(); writeln!(out).unwrap(); // Syntax writeln!(out, "```bash").unwrap(); - writeln!(out, "analyzer api {name} [flags]").unwrap(); + writeln!( + out, + "analyzer api {service_alias} {name} [flags]" + ) + .unwrap(); writeln!(out, "```").unwrap(); writeln!(out).unwrap(); @@ -194,22 +226,26 @@ fn render_resource_skill(name: &str, resource: &DiscoveryResource, version: &str // Discovering Commands section (gws pattern) writeln!(out, "## Discovering Commands").unwrap(); writeln!(out).unwrap(); + writeln!(out, "Before calling any API method, inspect it:").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```bash").unwrap(); + writeln!(out, "# Browse resources and methods").unwrap(); writeln!( out, - "Before calling any API method, inspect it:" + "analyzer api {service_alias} {name} --help" ) .unwrap(); writeln!(out).unwrap(); - writeln!(out, "```bash").unwrap(); - writeln!(out, "# Browse resources and methods").unwrap(); - writeln!(out, "analyzer api {name} --help").unwrap(); - writeln!(out).unwrap(); writeln!( out, "# Inspect a method's required params, types, and defaults" ) .unwrap(); - writeln!(out, "analyzer schema api.{name}.").unwrap(); + writeln!( + out, + "analyzer schema {service_alias}.{name}." + ) + .unwrap(); writeln!(out, "```").unwrap(); writeln!(out).unwrap(); writeln!( @@ -222,14 +258,14 @@ fn render_resource_skill(name: &str, resource: &DiscoveryResource, version: &str // Examples section writeln!(out, "## Examples").unwrap(); writeln!(out).unwrap(); - render_examples(&mut out, name, resource); + render_examples(&mut out, service_alias, name, resource); // See Also writeln!(out, "## See Also").unwrap(); writeln!(out).unwrap(); writeln!( out, - "- [analyzer-shared](../analyzer-shared/SKILL.md) — Global flags and auth" + "- [shared](../shared/SKILL.md) — Global flags and auth" ) .unwrap(); writeln!( @@ -242,7 +278,7 @@ fn render_resource_skill(name: &str, resource: &DiscoveryResource, version: &str out } -/// Render methods as a list with descriptions (gws pattern: `- method — description`). +/// Render methods as a list with descriptions. fn render_method_list( out: &mut String, _path_prefix: &str, @@ -281,182 +317,254 @@ fn render_sub_resources(out: &mut String, path_prefix: &str, resource: &Discover } } -/// Render concrete examples for common operations. -fn render_examples(out: &mut String, name: &str, resource: &DiscoveryResource) { - match name { - "objects" => { - writeln!(out, "```bash").unwrap(); - writeln!(out, "# List all objects").unwrap(); - writeln!(out, "analyzer api objects list").unwrap(); +/// Render examples dynamically from the resource's direct methods. +fn render_examples( + out: &mut String, + service_alias: &str, + name: &str, + resource: &DiscoveryResource, +) { + if resource.methods.is_empty() { + return; + } + + writeln!(out, "```bash").unwrap(); + let mut first = true; + for (method_name, method) in &resource.methods { + if !first { writeln!(out).unwrap(); - if resource.methods.contains_key("get") { - writeln!(out, "# Get a specific object").unwrap(); - writeln!( - out, - "analyzer api objects get --params '{{\"id\": \"OBJ_ID\"}}'" - ) - .unwrap(); - writeln!(out).unwrap(); - } - if resource.methods.contains_key("create") { - writeln!(out, "# Create an object (dry-run first)").unwrap(); - writeln!( - out, - "analyzer api objects create --json '{{\"name\": \"Router FW\", \"description\": \"Edge router firmware\"}}' --dry-run" - ).unwrap(); - writeln!(out).unwrap(); - } - if resource.methods.contains_key("delete") { - writeln!(out, "# Delete an object").unwrap(); - writeln!( - out, - "analyzer api objects delete --params '{{\"id\": \"OBJ_ID\"}}' --dry-run" - ) - .unwrap(); - } - writeln!(out, "```").unwrap(); } - "scans" => { - writeln!(out, "```bash").unwrap(); - writeln!(out, "# List scans").unwrap(); - writeln!(out, "analyzer api scans list").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Schedule a scan (dry-run first)").unwrap(); - writeln!(out, "analyzer api scans create --json '{{...}}' --dry-run").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Poll scan status").unwrap(); - writeln!( - out, - "analyzer api scans status list --params '{{\"id\": \"SCAN_ID\"}}'" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Get security score").unwrap(); - writeln!( - out, - "analyzer api scans score list --params '{{\"id\": \"SCAN_ID\"}}'" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Get scan overview").unwrap(); - writeln!( - out, - "analyzer api scans overview list --params '{{\"id\": \"SCAN_ID\"}}'" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Get CVE results").unwrap(); - writeln!( - out, - "analyzer api scans results get --params '{{\"scan_id\": \"SCAN_ID\", \"analysis_id\": \"cve\"}}'" - ).unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Check CRA compliance").unwrap(); - writeln!( - out, - "analyzer api scans compliance-check cyber-resilience-act list --params '{{\"id\": \"SCAN_ID\"}}'" - ).unwrap(); - writeln!(out, "```").unwrap(); + first = false; + + // Comment from description + let desc = method + .description + .as_deref() + .and_then(|d| d.lines().next()) + .filter(|l| !l.is_empty()) + .unwrap_or(method_name); + writeln!(out, "# {desc}").unwrap(); + + // Command base + write!(out, "analyzer api {service_alias} {name} {method_name}").unwrap(); + + // --params with actual path parameter names + let path_params: Vec<&str> = method + .parameters + .iter() + .filter(|(_, p)| p.location == "path") + .map(|(n, _)| n.as_str()) + .collect(); + if !path_params.is_empty() { + let pairs: Vec = path_params + .iter() + .map(|n| format!("\"{n}\": \"...\"")) + .collect(); + write!(out, " --params '{{{}}}'", pairs.join(", ")).unwrap(); } - "health" => { - writeln!(out, "```bash").unwrap(); - writeln!(out, "# Check service health").unwrap(); - writeln!(out, "analyzer api health list").unwrap(); - writeln!(out, "```").unwrap(); + + // --json if method has a request body + if method.request.is_some() { + write!(out, " --json '{{...}}'").unwrap(); } - _ => { - writeln!(out, "```bash").unwrap(); - if resource.methods.contains_key("list") { - writeln!(out, "analyzer api {name} list").unwrap(); - } - if resource.methods.contains_key("get") { - writeln!( - out, - "analyzer api {name} get --params '{{\"id\": \"...\"}}'", - ) - .unwrap(); - } - writeln!(out, "```").unwrap(); + + // --dry-run for mutating verbs + match method.http_method.as_str() { + "POST" | "PUT" | "PATCH" | "DELETE" => write!(out, " --dry-run").unwrap(), + _ => {} } + + writeln!(out).unwrap(); } + writeln!(out, "```").unwrap(); writeln!(out).unwrap(); } /// Render the shared SKILL.md covering global flags, auth, and error handling. -fn render_shared_skill(version: &str) -> String { - format!( - r#"--- -name: analyzer-shared -version: {version} -description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." -metadata: - openclaw: - category: "security" - requires: - bins: ["analyzer"] ---- - -# analyzer — Shared Reference - -## Authentication - -```bash -# Interactive login (prompts for API key, validates, saves) -analyzer login - -# Environment variable -export ANALYZER_API_KEY="your-api-key" -``` - -## Global Flags - -| Flag | Description | -|------|-------------| -| `--params ''` | Path and query parameters | -| `--json ''` | Request body for POST/PUT/PATCH | -| `--dry-run` | Validate and print request without executing | -| `--discovery ` | Discovery document location (also: `ANALYZER_DISCOVERY_URL` env var) | -| `--format ` | Output format: `human` (default), `json` | - -## CLI Syntax - -```bash -analyzer api [sub-resource] [flags] -``` - -## Schema Introspection - -Before calling any API method, inspect it: - -```bash -# Browse all resources -analyzer schema api +/// +/// Lists all registered services with their descriptions. +fn render_shared_skill() -> String { + let mut out = String::new(); -# Inspect a specific method -analyzer schema api.scans.create + writeln!(out, "---").unwrap(); + writeln!(out, "name: shared").unwrap(); + writeln!( + out, + "description: \"Analyzer CLI: Shared patterns for authentication, global flags, and error handling.\"" + ).unwrap(); + writeln!(out, "metadata:").unwrap(); + writeln!(out, " openclaw:").unwrap(); + writeln!(out, " category: \"security\"").unwrap(); + writeln!(out, " requires:").unwrap(); + writeln!(out, " bins: [\"analyzer\"]").unwrap(); + writeln!(out, "---").unwrap(); + writeln!(out).unwrap(); -# Browse a resource's methods -analyzer schema api.scans.compliance-check -``` + writeln!(out, "# analyzer — Shared Reference").unwrap(); + writeln!(out).unwrap(); -Use `analyzer schema` output to build your `--params` and `--json` flags. + // Registered services + writeln!(out, "## Registered Services").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "| Alias | API | Description |").unwrap(); + writeln!(out, "|-------|-----|-------------|").unwrap(); + for entry in SERVICES { + writeln!( + out, + "| `{}` | {} | {} |", + entry.aliases.join(", "), + entry.api_name, + entry.description + ) + .unwrap(); + } + writeln!(out).unwrap(); -## Security Rules + writeln!(out, "## Authentication").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```bash").unwrap(); + writeln!( + out, + "# Interactive login (prompts for API key, validates, saves)" + ) + .unwrap(); + writeln!(out, "analyzer login").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Environment variable").unwrap(); + writeln!(out, "export ANALYZER_API_KEY=\"your-api-key\"").unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); -- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution -- **Always** confirm with user before executing write/delete commands -- Prefer `--fields` to limit response size and protect the context window -- Poll scan status — do not guess when a scan completes + writeln!(out, "## Global Flags").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "| Flag | Description |").unwrap(); + writeln!(out, "|------|-------------|").unwrap(); + writeln!( + out, + "| `--params ''` | Path and query parameters |" + ) + .unwrap(); + writeln!( + out, + "| `--json ''` | Request body for POST/PUT/PATCH |" + ) + .unwrap(); + writeln!( + out, + "| `--dry-run` | Validate and print request without executing |" + ) + .unwrap(); + writeln!( + out, + "| `--discovery ` | Override discovery document (dev/testing) |" + ) + .unwrap(); + writeln!( + out, + "| `--format ` | Output format: `human` (default), `json` |" + ) + .unwrap(); + writeln!(out).unwrap(); -## Error Handling + writeln!(out, "## CLI Syntax").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```bash").unwrap(); + writeln!( + out, + "# API commands (service name is first positional arg)" + ) + .unwrap(); + writeln!( + out, + "analyzer api [sub-resource] [flags]" + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "# Schema introspection (service name is first dotted segment)" + ) + .unwrap(); + writeln!( + out, + "analyzer schema .." + ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Generate skills for all services").unwrap(); + writeln!(out, "analyzer generate-skills").unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); -All errors are JSON on stderr with a non-zero exit code: + writeln!(out, "## Schema Introspection").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "Before calling any API method, inspect it:").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```bash").unwrap(); + writeln!(out, "# Browse all resources for a service").unwrap(); + writeln!(out, "analyzer schema analyzer.api").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Inspect a specific method").unwrap(); + writeln!(out, "analyzer schema analyzer.scans.create").unwrap(); + writeln!(out).unwrap(); + writeln!(out, "# Browse a resource's methods").unwrap(); + writeln!( + out, + "analyzer schema analyzer.scans.compliance-check" + ) + .unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "Use `analyzer schema` output to build your `--params` and `--json` flags." + ) + .unwrap(); + writeln!(out).unwrap(); -```json -{{"error": {{"code": 404, "message": "Object not found"}}}} -``` + writeln!(out, "## Security Rules").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution" + ).unwrap(); + writeln!( + out, + "- **Always** confirm with user before executing write/delete commands" + ) + .unwrap(); + writeln!( + out, + "- Prefer `--fields` to limit response size and protect the context window" + ) + .unwrap(); + writeln!( + out, + "- Poll scan status — do not guess when a scan completes" + ) + .unwrap(); + writeln!(out).unwrap(); -Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. -"# + writeln!(out, "## Error Handling").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "All errors are JSON on stderr with a non-zero exit code:" ) + .unwrap(); + writeln!(out).unwrap(); + writeln!(out, "```json").unwrap(); + writeln!( + out, + "{{\"error\": {{\"code\": 404, \"message\": \"Object not found\"}}}}" + ) + .unwrap(); + writeln!(out, "```").unwrap(); + writeln!(out).unwrap(); + writeln!( + out, + "Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error." + ).unwrap(); + + out } diff --git a/src/api/schema.rs b/src/api/schema.rs index 68f52bb..f473f00 100644 --- a/src/api/schema.rs +++ b/src/api/schema.rs @@ -9,6 +9,10 @@ use serde::Serialize; use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; /// Handle `analyzer schema `. +/// +/// The path is resolved from the root of the discovery document's resource tree. +/// Service-level routing (extracting the service name from the user's input) +/// is handled by main.rs, which prepends `"api."` before calling this function. pub fn handle_schema_command(doc: &DiscoveryDocument, dotted_path: &str) -> Result<()> { let segments: Vec<&str> = dotted_path.split('.').collect(); @@ -16,15 +20,12 @@ pub fn handle_schema_command(doc: &DiscoveryDocument, dotted_path: &str) -> Resu bail!("path cannot be empty"); } - // Path must start with "api" — we resolve from resources["api"] - if segments[0] != "api" { - bail!("path must start with 'api' (e.g. api.scans.score.list)"); - } - + // First segment selects the top-level resource (typically "api") + let first = segments[0]; let api_resource = doc .resources - .get("api") - .context("discovery document has no 'api' resource")?; + .get(first) + .with_context(|| format!("resource '{first}' not found in discovery document"))?; let rest = &segments[1..]; diff --git a/src/discovery.rs b/src/discovery.rs index bd71620..9b43f2a 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -5,10 +5,13 @@ use std::collections::BTreeMap; use std::path::PathBuf; +use std::time::Duration; use anyhow::{Context, Result, bail}; use serde::{Deserialize, Serialize}; +use crate::services::ServiceEntry; + // --------------------------------------------------------------------------- // Serde models // --------------------------------------------------------------------------- @@ -123,6 +126,45 @@ pub async fn load(source: &DiscoverySource) -> Result { serde_json::from_str(&json_str).context("failed to parse discovery document") } +/// Load discovery document for a registered service with 24h file cache. +/// +/// Cache location: `~/.cache/analyzer/{api_name}_{version}.json` +pub async fn load_for_service(entry: &ServiceEntry) -> Result { + let cache_dir = dirs::cache_dir() + .unwrap_or_else(|| PathBuf::from(".cache")) + .join("analyzer"); + std::fs::create_dir_all(&cache_dir) + .with_context(|| format!("failed to create cache dir {}", cache_dir.display()))?; + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + + // Check cache (24h TTL) + if let Ok(metadata) = std::fs::metadata(&cache_file) { + if let Ok(modified) = metadata.modified() { + if modified.elapsed().unwrap_or_default() < Duration::from_secs(86400) { + let json = std::fs::read_to_string(&cache_file) + .with_context(|| format!("failed to read cache {}", cache_file.display()))?; + return serde_json::from_str(&json) + .context("failed to parse cached discovery document"); + } + } + } + + // Fetch from URL + let json = reqwest::get(entry.discovery_url) + .await + .with_context(|| format!("failed to fetch {}", entry.discovery_url))? + .text() + .await + .context("failed to read response body")?; + + // Write cache (best-effort) + if let Err(e) = std::fs::write(&cache_file, &json) { + eprintln!("warning: failed to write cache {}: {e}", cache_file.display()); + } + + serde_json::from_str(&json).context("failed to parse discovery document") +} + // --------------------------------------------------------------------------- // Lookup helpers // --------------------------------------------------------------------------- diff --git a/src/main.rs b/src/main.rs index 0a2b727..1e5b474 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,12 +9,13 @@ mod commands; mod config; mod discovery; mod output; +mod services; use std::path::PathBuf; use std::process::ExitCode; use std::time::Duration; -use anyhow::Result; +use anyhow::{Context, Result, bail}; use clap::{Parser, Subcommand}; use uuid::Uuid; @@ -96,25 +97,28 @@ enum Command { /// Discovery-driven API access — dynamically generated from a discovery document. /// - /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + /// The first positional arg is the service name (e.g. "analyzer"). + /// Uses the service registry by default; --discovery overrides. Api { - /// Arguments passed to the dynamic command tree. + /// Service name followed by resource path and method + /// (e.g., analyzer scans list). #[arg(trailing_var_arg = true, allow_hyphen_values = true, num_args = 0..)] args: Vec, }, /// Introspect method signatures from the discovery document. /// - /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + /// Path format: .. (e.g. "analyzer.scans.score.list"). + /// Uses the service registry by default; --discovery overrides. Schema { - /// Dotted path to introspect (e.g. "api.scans.score.list"). + /// Dotted path: ... path: String, }, - /// Generate skill files from the discovery document. + /// Generate skill files for all registered services. /// - /// Reads the discovery document and writes markdown skill files to `skills/`. - /// Requires --discovery or ANALYZER_DISCOVERY_URL to be set. + /// Fetches discovery documents from the service registry (or uses --discovery + /// override) and writes markdown skill files to `skills/`. GenerateSkills, } @@ -537,11 +541,28 @@ async fn run(cli: Cli) -> Result<()> { // -- Discovery-driven commands (agent mode) ----------------------- Command::Api { args } => { - let source = discovery::resolve_source(discovery_flag.as_deref())?; - let doc = discovery::load(&source).await?; + // First arg is the service name + let (service_name, rest_args) = args + .split_first() + .context("usage: analyzer api [flags]")?; + + // --discovery flag overrides registry lookup + let doc = if let Some(flag) = discovery_flag.as_deref() { + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + let entry = services::resolve_service(service_name).with_context(|| { + format!( + "unknown service '{service_name}'\n\nAvailable: {}", + services::list_aliases().join(", ") + ) + })?; + discovery::load_for_service(entry).await? + }; + let api_cmd = api::build_api_command(&doc); let api_matches = match api_cmd - .try_get_matches_from(std::iter::once("api".to_string()).chain(args)) + .try_get_matches_from(std::iter::once("api".to_string()).chain(rest_args.iter().cloned())) { Ok(m) => m, Err(e) => { @@ -563,18 +584,53 @@ async fn run(cli: Cli) -> Result<()> { } Command::Schema { path } => { - let source = discovery::resolve_source(discovery_flag.as_deref())?; - let doc = discovery::load(&source).await?; - api::schema::handle_schema_command(&doc, &path) + // path = "analyzer.scans.create" — first segment is service name + let segments: Vec<&str> = path.splitn(2, '.').collect(); + let (service_name, rest_path) = match segments.as_slice() { + [svc, rest] => (*svc, *rest), + _ => bail!("path must be .."), + }; + + let doc = if let Some(flag) = discovery_flag.as_deref() { + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + let entry = services::resolve_service(service_name).with_context(|| { + format!( + "unknown service '{service_name}'\n\nAvailable: {}", + services::list_aliases().join(", ") + ) + })?; + discovery::load_for_service(entry).await? + }; + + // rest_path = "scans.create" — pass with "api." prefix for the resource tree + api::schema::handle_schema_command(&doc, &format!("api.{rest_path}")) } Command::GenerateSkills => { - let source = discovery::resolve_source(discovery_flag.as_deref())?; - let doc = discovery::load(&source).await?; let skills_dir = std::path::Path::new("skills"); - println!("Generating skills from discovery document..."); - api::generate_skills::generate(&doc, skills_dir)?; - println!("Done."); + println!( + "Generating skills for {} service(s)...", + services::SERVICES.len() + ); + for entry in services::SERVICES { + println!( + "\n Service: {} ({})", + entry.aliases[0], entry.api_name + ); + let doc = if let Some(flag) = discovery_flag.as_deref() { + // --discovery provided: use it (single service mode for dev) + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + discovery::load_for_service(entry).await? + }; + api::generate_skills::generate_for_service(&doc, entry, skills_dir)?; + } + api::generate_skills::generate_shared(skills_dir)?; + api::generate_skills::write_skills_index(skills_dir)?; + println!("\nDone."); Ok(()) } } diff --git a/src/services.rs b/src/services.rs new file mode 100644 index 0000000..cbff2a9 --- /dev/null +++ b/src/services.rs @@ -0,0 +1,72 @@ +/// Compile-time service registry. +/// +/// Each entry maps one or more CLI aliases to an API name, version, +/// and the URL where its Discovery Document can be fetched. +pub struct ServiceEntry { + pub aliases: &'static [&'static str], + pub api_name: &'static str, + pub version: &'static str, + pub discovery_url: &'static str, + pub description: &'static str, +} + +pub const SERVICES: &[ServiceEntry] = &[ + ServiceEntry { + aliases: &["analyzer"], + api_name: "analyzer-api-routes", + version: "0.5.0", + discovery_url: "https://analyzer.exein.dev/analyzer-discovery.json", + description: "Firmware and software image security analysis", + }, + // Future entries (same domain, different discovery files): + // ServiceEntry { + // aliases: &["isaac"], + // api_name: "isaac-api", + // version: "1.0.0", + // discovery_url: "https://analyzer.exein.dev/isaac-discovery.json", + // description: "Device identity and attestation", + // }, + // ServiceEntry { + // aliases: &["vuln-tracker"], + // api_name: "vulnerability-tracker-api", + // version: "1.0.0", + // discovery_url: "https://analyzer.exein.dev/vuln-tracker-discovery.json", + // description: "Vulnerability tracking and advisory management", + // }, +]; + +/// Resolve a service alias to its registry entry. +pub fn resolve_service(name: &str) -> Option<&'static ServiceEntry> { + SERVICES.iter().find(|s| s.aliases.contains(&name)) +} + +/// List all registered aliases (for help text and error messages). +pub fn list_aliases() -> Vec<&'static str> { + SERVICES + .iter() + .flat_map(|s| s.aliases.iter().copied()) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn resolve_known_service() { + let entry = resolve_service("analyzer").expect("should resolve"); + assert_eq!(entry.api_name, "analyzer-api-routes"); + assert_eq!(entry.version, "0.5.0"); + } + + #[test] + fn resolve_unknown_returns_none() { + assert!(resolve_service("nonexistent").is_none()); + } + + #[test] + fn list_aliases_includes_analyzer() { + let aliases = list_aliases(); + assert!(aliases.contains(&"analyzer")); + } +} From 894fa7fa76680f241fdfb908e5bd9056477b251e Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 16:48:33 +0100 Subject: [PATCH 05/38] feat(dommyrock-analyzer-cli): update shared skill gen code and rename into generate_shared_skill --- README.md | 2 + skills/analyzer-health/SKILL.md | 29 +- skills/analyzer-objects/SKILL.md | 51 +-- skills/analyzer-scans/SKILL.md | 95 ++--- skills/analyzer-shared/SKILL.md | 72 ---- src/api/generate_skills.rs | 603 +++++++++++-------------------- 6 files changed, 254 insertions(+), 598 deletions(-) delete mode 100644 skills/analyzer-shared/SKILL.md diff --git a/README.md b/README.md index 7b7892c..4314391 100644 --- a/README.md +++ b/README.md @@ -273,6 +273,8 @@ Settings are resolved in this order (highest priority first): | `docker` | info, cve, password-hash, crypto, software-bom, malware, hardening, capabilities | | `idf` | info, cve, software-bom, symbols, tasks, stack-overflow | +--- + ## Agent mode The CLI includes a discovery-driven layer designed for AI agents. While the commands above are human-friendly (named flags, progress bars, `--wait`), the agent layer exposes the full API surface dynamically from a [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) — no hardcoded commands. diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index 550a3e1..86e04a2 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -10,25 +10,17 @@ metadata: cliHelp: "analyzer api analyzer health --help" --- -# analyzer-health +# health (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer health [flags] ``` -## API Methods +## API Resources - - `list` (GET) — Returns if the service is in an healthy state. - -## Flags - -| Flag | Purpose | -|------|---------| -| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | -| `--json ''` | Request body for POST/PUT/PATCH methods | -| `--dry-run` | Print the request without executing | + - `list` — Returns if the service is in an healthy state. ## Discovering Commands @@ -43,16 +35,3 @@ analyzer schema analyzer.health. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Examples - -```bash -# Returns if the service is in an healthy state. -analyzer api analyzer health list -``` - -## See Also - -- [shared](../shared/SKILL.md) — Global flags and auth -- [CONTEXT.md](../../CONTEXT.md) — Full agent reference - diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index a206668..7a3719a 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -10,33 +10,25 @@ metadata: cliHelp: "analyzer api analyzer objects --help" --- -# analyzer-objects +# objects (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer objects [flags] ``` -## API Methods +## API Resources - - `create` (POST) — Create new object - - `delete` (DELETE) — Deletes a object and all related scans. - - `get` (GET) — Retrieve an object by its ID. - - `list` (GET) — Retrieve a list of all objects of the current user. - - `update` (PUT) — Update an object + - `create` — Create new object + - `delete` — Deletes a object and all related scans. + - `get` — Retrieve an object by its ID. + - `list` — Retrieve a list of all objects of the current user. + - `update` — Update an object ### scans - - `list` (GET) — Those scans could be - -## Flags - -| Flag | Purpose | -|------|---------| -| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | -| `--json ''` | Request body for POST/PUT/PATCH methods | -| `--dry-run` | Print the request without executing | + - `list` — Those scans could be ## Discovering Commands @@ -51,28 +43,3 @@ analyzer schema analyzer.objects. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Examples - -```bash -# Create new object -analyzer api analyzer objects create --json '{...}' --dry-run - -# Deletes a object and all related scans. -analyzer api analyzer objects delete --params '{"id": "..."}' --dry-run - -# Retrieve an object by its ID. -analyzer api analyzer objects get --params '{"id": "..."}' - -# Retrieve a list of all objects of the current user. -analyzer api analyzer objects list - -# Update an object -analyzer api analyzer objects update --params '{"id": "..."}' --json '{...}' --dry-run -``` - -## See Also - -- [shared](../shared/SKILL.md) — Global flags and auth -- [CONTEXT.md](../../CONTEXT.md) — Full agent reference - diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index 0bb5008..060565b 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -10,92 +10,61 @@ metadata: cliHelp: "analyzer api analyzer scans --help" --- -# analyzer-scans +# scans (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling. +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer scans [flags] ``` -## API Methods +## API Resources - - `create` (POST) — Schedule a new scan. - - `delete` (DELETE) — Delete a scan. - - `get` (GET) — Returns a scan. - - `list` (GET) — Retrieve a list of scans. + - `create` — Schedule a new scan. + - `delete` — Delete a scan. + - `get` — Returns a scan. + - `list` — Retrieve a list of scans. ### cancel - - `create` (POST) — This can be used to cancel an already pending or running scan. - -### compliance-check - -### cyber-resilience-act - - - `list` (GET) — Computes compliance with Cyber Resilience Act - -### ai-suggestion - -### begin - - - `create` (POST) — Triggers CRA AI suggestion using user-provided documents. - -### status - - - `list` (GET) — Returns status of the CRA AI suggestion. - -### overwrite - - - `overwrite_compliance_check_requirement` (PUT) — Overwrites compliance check requirement - -### report - - - `list` (GET) — Downloads Cyber Resilience Act compliance report as PDF + - `create` — This can be used to cancel an already pending or running scan. + - `compliance-check` — Operations on the 'compliance-check' resource ### documents - - `create` (POST) — Upload a document for a scan. - - `delete` (DELETE) — Delete a single document for a scan. - - `delete_documents` (DELETE) — Delete all documents for a scan. - - `list` (GET) — List documents for a scan. + - `create` — Upload a document for a scan. + - `delete` — Delete a single document for a scan. + - `delete_documents` — Delete all documents for a scan. + - `list` — List documents for a scan. ### overview - - `get` (GET) — Returns an overview of one analysis. - - `list` (GET) — Returns an aggregated overview of all analysis executed for one scan. + - `get` — Returns an overview of one analysis. + - `list` — Returns an aggregated overview of all analysis executed for one scan. ### report - - `list` (GET) — Downloads a PDF security report for a scan. + - `list` — Downloads a PDF security report for a scan. ### results - - `get` (GET) — Retrieve the results of one specific analysis of a scan. + - `get` — Retrieve the results of one specific analysis of a scan. ### sbom - - `list` (GET) — Downloads the SBOM (CycloneDX JSON) for a scan. + - `list` — Downloads the SBOM (CycloneDX JSON) for a scan. ### score - - `list` (GET) — Returns a security score of all successful finished analyses with their individual scores included. + - `list` — Returns a security score of all successful finished analyses with their individual scores included. ### status - - `list` (GET) — Returns the status of a scan. + - `list` — Returns the status of a scan. ### types - - `list` (GET) — Returns a list of all available analysis types for each different image. - -## Flags - -| Flag | Purpose | -|------|---------| -| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) | -| `--json ''` | Request body for POST/PUT/PATCH methods | -| `--dry-run` | Print the request without executing | + - `list` — Returns a list of all available analysis types for each different image. ## Discovering Commands @@ -110,25 +79,3 @@ analyzer schema analyzer.scans. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Examples - -```bash -# Schedule a new scan. -analyzer api analyzer scans create --dry-run - -# Delete a scan. -analyzer api analyzer scans delete --params '{"id": "..."}' --dry-run - -# Returns a scan. -analyzer api analyzer scans get --params '{"id": "..."}' - -# Retrieve a list of scans. -analyzer api analyzer scans list -``` - -## See Also - -- [shared](../shared/SKILL.md) — Global flags and auth -- [CONTEXT.md](../../CONTEXT.md) — Full agent reference - diff --git a/skills/analyzer-shared/SKILL.md b/skills/analyzer-shared/SKILL.md deleted file mode 100644 index 041e656..0000000 --- a/skills/analyzer-shared/SKILL.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: analyzer-shared -version: 0.5.0 -description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." -metadata: - openclaw: - category: "security" - requires: - bins: ["analyzer"] ---- - -# analyzer — Shared Reference - -## Authentication - -```bash -# Interactive login (prompts for API key, validates, saves) -analyzer login - -# Environment variable -export ANALYZER_API_KEY="your-api-key" -``` - -## Global Flags - -| Flag | Description | -|------|-------------| -| `--params ''` | Path and query parameters | -| `--json ''` | Request body for POST/PUT/PATCH | -| `--dry-run` | Validate and print request without executing | -| `--discovery ` | Discovery document location (also: `ANALYZER_DISCOVERY_URL` env var) | -| `--format ` | Output format: `human` (default), `json` | - -## CLI Syntax - -```bash -analyzer api [sub-resource] [flags] -``` - -## Schema Introspection - -Before calling any API method, inspect it: - -```bash -# Browse all resources -analyzer schema api - -# Inspect a specific method -analyzer schema api.scans.create - -# Browse a resource's methods -analyzer schema api.scans.compliance-check -``` - -Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Security Rules - -- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution -- **Always** confirm with user before executing write/delete commands -- Prefer `--fields` to limit response size and protect the context window -- Poll scan status — do not guess when a scan completes - -## Error Handling - -All errors are JSON on stderr with a non-zero exit code: - -```json -{"error": {"code": 404, "message": "Object not found"}} -``` - -Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 856a600..3305085 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -5,16 +5,17 @@ //! API resource per service, plus a shared skill for global flags, auth, and //! error handling. //! -//! Follows the same pattern as the gws CLI's `generate-skills` command: -//! YAML frontmatter with `metadata.openclaw`, prerequisite notes, -//! "Discovering Commands" section, and a skills index. +//! Skills are intentionally minimal: method names + one-line descriptions only. +//! Agents use `analyzer schema ..` at runtime to +//! discover parameters, request/response schemas, and types — schema +//! introspection replaces static documentation. use std::fmt::Write as FmtWrite; use std::path::Path; use anyhow::{Context, Result}; -use crate::discovery::{DiscoveryDocument, DiscoveryMethod, DiscoveryResource}; +use crate::discovery::{DiscoveryDocument, DiscoveryResource}; use crate::services::{ServiceEntry, SERVICES}; struct SkillIndexEntry { @@ -57,18 +58,15 @@ pub fn generate_for_service( } /// Generate the shared skill file (service-agnostic: auth, flags, patterns). -/// -/// Lists all registered services, covers authentication, global flags, -/// schema introspection, and error handling. pub fn generate_shared(output_dir: &Path) -> Result<()> { std::fs::create_dir_all(output_dir) .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; let shared_dir = output_dir.join("shared"); std::fs::create_dir_all(&shared_dir)?; - let shared_content = render_shared_skill(); + let content = generate_shared_skill(); let shared_path = shared_dir.join("SKILL.md"); - std::fs::write(&shared_path, &shared_content) + std::fs::write(&shared_path, &content) .with_context(|| format!("failed to write {}", shared_path.display()))?; println!(" wrote {}", shared_path.display()); Ok(()) @@ -78,7 +76,6 @@ pub fn generate_shared(output_dir: &Path) -> Result<()> { pub fn write_skills_index(output_dir: &Path) -> Result<()> { let mut entries: Vec = Vec::new(); - // Collect all skill directories that contain a SKILL.md if output_dir.exists() { let mut dirs: Vec<_> = std::fs::read_dir(output_dir)? .filter_map(|e| e.ok()) @@ -91,7 +88,8 @@ pub fn write_skills_index(output_dir: &Path) -> Result<()> { let skill_file = entry.path().join("SKILL.md"); if skill_file.exists() { let description = if name == "shared" { - "Shared patterns for authentication, global flags, and error handling.".to_string() + "Shared patterns for authentication, global flags, and error handling." + .to_string() } else { format!("API operations for {name}.") }; @@ -131,10 +129,15 @@ pub fn write_skills_index(output_dir: &Path) -> Result<()> { } // --------------------------------------------------------------------------- -// Rendering +// Rendering — resource skills (minimal, gws pattern) // --------------------------------------------------------------------------- -/// Render a SKILL.md for a single top-level resource (e.g. "objects", "scans"). +/// Render a SKILL.md for a single top-level resource. +/// +/// Intentionally minimal: frontmatter, syntax, method names with descriptions, +/// and a "Discovering Commands" section pointing to schema introspection. +/// No examples, no flags tables, no parameter details — agents use +/// `analyzer schema` to discover those at runtime. fn render_resource_skill( service_alias: &str, name: &str, @@ -142,275 +145,141 @@ fn render_resource_skill( entry: &ServiceEntry, version: &str, ) -> String { - let mut out = String::new(); - let skill_name = format!("{service_alias}-{name}"); - // YAML frontmatter (matches gws pattern with metadata.openclaw) - writeln!(out, "---").unwrap(); - writeln!(out, "name: {skill_name}").unwrap(); - writeln!(out, "version: {version}").unwrap(); - writeln!( - out, - "description: \"Manage {name} via the {service_alias} API — {}\"", + let description = format!( + "Manage {name} via the {service_alias} API — {}", entry.description - ) - .unwrap(); - writeln!(out, "metadata:").unwrap(); - writeln!(out, " openclaw:").unwrap(); - writeln!(out, " category: \"security\"").unwrap(); - writeln!(out, " requires:").unwrap(); - writeln!(out, " bins: [\"analyzer\"]").unwrap(); - writeln!( - out, - " cliHelp: \"analyzer api {service_alias} {name} --help\"" - ) - .unwrap(); - writeln!(out, "---").unwrap(); - writeln!(out).unwrap(); + ); - // Title - writeln!(out, "# {skill_name}").unwrap(); - writeln!(out).unwrap(); + let mut out = format!( + r#"--- +name: {skill_name} +version: {version} +description: "{description}" +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] + cliHelp: "analyzer api {service_alias} {name} --help" +--- - // Prerequisite note (gws pattern) - writeln!( - out, - "> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and error handling." - ) - .unwrap(); - writeln!(out).unwrap(); +# {name} ({version}) - // Syntax - writeln!(out, "```bash").unwrap(); - writeln!( - out, - "analyzer api {service_alias} {name} [flags]" - ) - .unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); +> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. - // API Methods section - writeln!(out, "## API Methods").unwrap(); - writeln!(out).unwrap(); +```bash +analyzer api {service_alias} {name} [flags] +``` + +## API Resources + +"#, + ); // Direct methods on this resource - render_method_list(&mut out, name, &resource.methods); + render_methods(&mut out, &resource.methods); - // Sub-resources - render_sub_resources(&mut out, name, resource); + // Sub-resources (gws pattern: "name — Operations on the 'name' resource") + render_sub_resources(&mut out, resource); - // Flags section - writeln!(out, "## Flags").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "| Flag | Purpose |").unwrap(); - writeln!(out, "|------|---------|").unwrap(); - writeln!( + // Discovering Commands — the key section that replaces bloated examples + write!( out, - "| `--params ''` | Path and query parameters (e.g., `id`, `limit`, `page`) |" - ) - .unwrap(); - writeln!( - out, - "| `--json ''` | Request body for POST/PUT/PATCH methods |" - ) - .unwrap(); - writeln!( - out, - "| `--dry-run` | Print the request without executing |" - ) - .unwrap(); - writeln!(out).unwrap(); + r#"## Discovering Commands - // Discovering Commands section (gws pattern) - writeln!(out, "## Discovering Commands").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "Before calling any API method, inspect it:").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "```bash").unwrap(); - writeln!(out, "# Browse resources and methods").unwrap(); - writeln!( - out, - "analyzer api {service_alias} {name} --help" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "# Inspect a method's required params, types, and defaults" - ) - .unwrap(); - writeln!( - out, - "analyzer schema {service_alias}.{name}." - ) - .unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "Use `analyzer schema` output to build your `--params` and `--json` flags." - ) - .unwrap(); - writeln!(out).unwrap(); +Before calling any API method, inspect it: - // Examples section - writeln!(out, "## Examples").unwrap(); - writeln!(out).unwrap(); - render_examples(&mut out, service_alias, name, resource); +```bash +# Browse resources and methods +analyzer api {service_alias} {name} --help - // See Also - writeln!(out, "## See Also").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "- [shared](../shared/SKILL.md) — Global flags and auth" - ) - .unwrap(); - writeln!( - out, - "- [CONTEXT.md](../../CONTEXT.md) — Full agent reference" +# Inspect a method's required params, types, and defaults +analyzer schema {service_alias}.{name}. +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. +"# ) .unwrap(); - writeln!(out).unwrap(); out } -/// Render methods as a list with descriptions. -fn render_method_list( +/// Render method names with one-line descriptions. +fn render_methods( out: &mut String, - _path_prefix: &str, - methods: &std::collections::BTreeMap, + methods: &std::collections::BTreeMap, ) { - if methods.is_empty() { - return; - } - for (method_name, method) in methods { - let desc = method - .description - .as_deref() - .unwrap_or("") - .lines() - .next() - .unwrap_or(""); - writeln!(out, " - `{method_name}` ({}) — {desc}", method.http_method).unwrap(); + let desc = truncate_desc( + method + .description + .as_deref() + .unwrap_or(""), + ); + writeln!(out, " - `{method_name}` — {desc}").unwrap(); } - writeln!(out).unwrap(); } -/// Render sub-resources recursively. -fn render_sub_resources(out: &mut String, path_prefix: &str, resource: &DiscoveryResource) { +/// Render sub-resources. Deep nesting is listed as a pointer, not fully expanded. +fn render_sub_resources(out: &mut String, resource: &DiscoveryResource) { for (sub_name, sub_resource) in &resource.resources { - let sub_path = format!("{path_prefix}.{sub_name}"); - - writeln!(out, "### {sub_name}").unwrap(); - writeln!(out).unwrap(); - - // Methods on this sub-resource - render_method_list(out, &sub_path, &sub_resource.methods); - - // Recurse into nested sub-resources - render_sub_resources(out, &sub_path, sub_resource); - } -} - -/// Render examples dynamically from the resource's direct methods. -fn render_examples( - out: &mut String, - service_alias: &str, - name: &str, - resource: &DiscoveryResource, -) { - if resource.methods.is_empty() { - return; - } - - writeln!(out, "```bash").unwrap(); - let mut first = true; - for (method_name, method) in &resource.methods { - if !first { + if sub_resource.methods.is_empty() && !sub_resource.resources.is_empty() { + // Container resource — just list it as a pointer + writeln!( + out, + " - `{sub_name}` — Operations on the '{sub_name}' resource" + ) + .unwrap(); + } else { writeln!(out).unwrap(); + writeln!(out, "### {sub_name}").unwrap(); + writeln!(out).unwrap(); + render_methods(out, &sub_resource.methods); + // Nested sub-resources listed as pointers + for nested_name in sub_resource.resources.keys() { + writeln!( + out, + " - `{nested_name}` — Operations on the '{nested_name}' resource" + ) + .unwrap(); + } } - first = false; - - // Comment from description - let desc = method - .description - .as_deref() - .and_then(|d| d.lines().next()) - .filter(|l| !l.is_empty()) - .unwrap_or(method_name); - writeln!(out, "# {desc}").unwrap(); - - // Command base - write!(out, "analyzer api {service_alias} {name} {method_name}").unwrap(); - - // --params with actual path parameter names - let path_params: Vec<&str> = method - .parameters - .iter() - .filter(|(_, p)| p.location == "path") - .map(|(n, _)| n.as_str()) - .collect(); - if !path_params.is_empty() { - let pairs: Vec = path_params - .iter() - .map(|n| format!("\"{n}\": \"...\"")) - .collect(); - write!(out, " --params '{{{}}}'", pairs.join(", ")).unwrap(); - } - - // --json if method has a request body - if method.request.is_some() { - write!(out, " --json '{{...}}'").unwrap(); - } - - // --dry-run for mutating verbs - match method.http_method.as_str() { - "POST" | "PUT" | "PATCH" | "DELETE" => write!(out, " --dry-run").unwrap(), - _ => {} - } - - writeln!(out).unwrap(); } - writeln!(out, "```").unwrap(); writeln!(out).unwrap(); } -/// Render the shared SKILL.md covering global flags, auth, and error handling. -/// -/// Lists all registered services with their descriptions. -fn render_shared_skill() -> String { - let mut out = String::new(); - - writeln!(out, "---").unwrap(); - writeln!(out, "name: shared").unwrap(); - writeln!( - out, - "description: \"Analyzer CLI: Shared patterns for authentication, global flags, and error handling.\"" - ).unwrap(); - writeln!(out, "metadata:").unwrap(); - writeln!(out, " openclaw:").unwrap(); - writeln!(out, " category: \"security\"").unwrap(); - writeln!(out, " requires:").unwrap(); - writeln!(out, " bins: [\"analyzer\"]").unwrap(); - writeln!(out, "---").unwrap(); - writeln!(out).unwrap(); +/// Truncate a description to its first sentence/line, max 120 chars. +fn truncate_desc(desc: &str) -> String { + let first_line = desc.lines().next().unwrap_or(""); + // Cut at first sentence end if within limit + let truncated = first_line + .find(". ") + .map(|i| &first_line[..=i]) + .unwrap_or(first_line); + if truncated.len() > 120 { + format!("{}...", &truncated[..117]) + } else { + truncated.to_string() + } +} - writeln!(out, "# analyzer — Shared Reference").unwrap(); - writeln!(out).unwrap(); +// --------------------------------------------------------------------------- +// Rendering — shared skill (raw string for readability) +// --------------------------------------------------------------------------- - // Registered services - writeln!(out, "## Registered Services").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "| Alias | API | Description |").unwrap(); - writeln!(out, "|-------|-----|-------------|").unwrap(); +/// Generate the shared SKILL.md using raw strings for readability. +/// +/// Covers registered services, authentication, global flags, CLI syntax, +/// schema introspection, security rules, and error handling. +fn generate_shared_skill() -> String { + // Build the services table dynamically + let mut services_table = String::new(); for entry in SERVICES { writeln!( - out, + services_table, "| `{}` | {} | {} |", entry.aliases.join(", "), entry.api_name, @@ -418,153 +287,117 @@ fn render_shared_skill() -> String { ) .unwrap(); } - writeln!(out).unwrap(); - writeln!(out, "## Authentication").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "```bash").unwrap(); - writeln!( - out, - "# Interactive login (prompts for API key, validates, saves)" - ) - .unwrap(); - writeln!(out, "analyzer login").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Environment variable").unwrap(); - writeln!(out, "export ANALYZER_API_KEY=\"your-api-key\"").unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); + format!( + r#"--- +name: shared +description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +metadata: + openclaw: + category: "security" + requires: + bins: ["analyzer"] +--- - writeln!(out, "## Global Flags").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "| Flag | Description |").unwrap(); - writeln!(out, "|------|-------------|").unwrap(); - writeln!( - out, - "| `--params ''` | Path and query parameters |" - ) - .unwrap(); - writeln!( - out, - "| `--json ''` | Request body for POST/PUT/PATCH |" - ) - .unwrap(); - writeln!( - out, - "| `--dry-run` | Validate and print request without executing |" - ) - .unwrap(); - writeln!( - out, - "| `--discovery ` | Override discovery document (dev/testing) |" - ) - .unwrap(); - writeln!( - out, - "| `--format ` | Output format: `human` (default), `json` |" - ) - .unwrap(); - writeln!(out).unwrap(); +# analyzer — Shared Reference - writeln!(out, "## CLI Syntax").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "```bash").unwrap(); - writeln!( - out, - "# API commands (service name is first positional arg)" - ) - .unwrap(); - writeln!( - out, - "analyzer api [sub-resource] [flags]" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "# Schema introspection (service name is first dotted segment)" - ) - .unwrap(); - writeln!( - out, - "analyzer schema .." - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Generate skills for all services").unwrap(); - writeln!(out, "analyzer generate-skills").unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); +## Registered Services - writeln!(out, "## Schema Introspection").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "Before calling any API method, inspect it:").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "```bash").unwrap(); - writeln!(out, "# Browse all resources for a service").unwrap(); - writeln!(out, "analyzer schema analyzer.api").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Inspect a specific method").unwrap(); - writeln!(out, "analyzer schema analyzer.scans.create").unwrap(); - writeln!(out).unwrap(); - writeln!(out, "# Browse a resource's methods").unwrap(); - writeln!( - out, - "analyzer schema analyzer.scans.compliance-check" - ) - .unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "Use `analyzer schema` output to build your `--params` and `--json` flags." - ) - .unwrap(); - writeln!(out).unwrap(); +| Alias | API | Description | +|-------|-----|-------------| +{services_table} +## Authentication - writeln!(out, "## Security Rules").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution" - ).unwrap(); - writeln!( - out, - "- **Always** confirm with user before executing write/delete commands" - ) - .unwrap(); - writeln!( - out, - "- Prefer `--fields` to limit response size and protect the context window" - ) - .unwrap(); - writeln!( - out, - "- Poll scan status — do not guess when a scan completes" - ) - .unwrap(); - writeln!(out).unwrap(); +```bash +# Interactive login (prompts for API key, validates, saves) +analyzer login - writeln!(out, "## Error Handling").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "All errors are JSON on stderr with a non-zero exit code:" - ) - .unwrap(); - writeln!(out).unwrap(); - writeln!(out, "```json").unwrap(); - writeln!( - out, - "{{\"error\": {{\"code\": 404, \"message\": \"Object not found\"}}}}" +# Environment variable +export ANALYZER_API_KEY="your-api-key" +``` + +## Global Flags + +| Flag | Description | +|------|-------------| +| `--params ''` | Path and query parameters | +| `--json ''` | Request body for POST/PUT/PATCH | +| `--dry-run` | Validate and print request without executing | +| `--discovery ` | Override discovery document (dev/testing) | +| `--format ` | Output format: `human` (default), `json` | + +## CLI Syntax + +```bash +# API commands (service name is first positional arg) +analyzer api [sub-resource] [flags] + +# Schema introspection (service name is first dotted segment) +analyzer schema .. + +# Generate skills for all services +analyzer generate-skills +``` + +## Schema Introspection + +Before calling any API method, inspect it: + +```bash +# Browse all resources for a service +analyzer schema analyzer.api + +# Inspect a specific method +analyzer schema analyzer.scans.create + +# Browse a resource's methods +analyzer schema analyzer.scans.compliance-check +``` + +Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Security Rules + +- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution +- **Always** confirm with user before executing write/delete commands +- Prefer `--fields` to limit response size and protect the context window +- Poll scan status — do not guess when a scan completes + +## Error Handling + +All errors are JSON on stderr with a non-zero exit code: + +```json +{{"error": {{"code": 404, "message": "Object not found"}}}} +``` + +Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. +"# ) - .unwrap(); - writeln!(out, "```").unwrap(); - writeln!(out).unwrap(); - writeln!( - out, - "Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error." - ).unwrap(); +} - out +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn truncate_short_desc() { + assert_eq!(truncate_desc("Returns a scan."), "Returns a scan."); + } + + #[test] + fn truncate_multi_sentence() { + assert_eq!( + truncate_desc("Returns a scan. This includes all metadata and results."), + "Returns a scan." + ); + } + + #[test] + fn truncate_long_desc() { + let long = "a".repeat(200); + let result = truncate_desc(&long); + assert!(result.len() <= 120); + assert!(result.ends_with("...")); + } } From 56bcaa311d0ad887931c87e149cc0b7bd4d60695 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 17:03:16 +0100 Subject: [PATCH 06/38] feat(dommyrock-analyzer-cli): add to Limit response fields (context window protection) --- CONTEXT.md | 199 ++++++++----------------------------- docs/skills.md | 1 - skills/shared/SKILL.md | 1 + src/api/executor.rs | 78 ++++++++++++++- src/api/generate_skills.rs | 1 + src/api/mod.rs | 9 ++ 6 files changed, 125 insertions(+), 164 deletions(-) diff --git a/CONTEXT.md b/CONTEXT.md index d101a6e..3da2632 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -7,209 +7,92 @@ The `analyzer` CLI provides dynamic access to firmware and software security API | Alias | Description | |-------|-------------| | `analyzer` | Firmware and software image security analysis | - - ## Rules of Engagement for Agents -* **Schema Discovery:** *If you don't know the exact JSON payload structure, run `analyzer schema ..` first to inspect the schema before executing.* -* **Context Window Protection:** *Scan results and overview responses can be large. ALWAYS use `--fields` when listing or getting resources to avoid overwhelming your context window.* +* **Schema First:** *If you don't know the exact JSON payload structure, run `analyzer schema ..` first to inspect the schema before executing.* +* **Context Window Protection:** *API responses can be large. ALWAYS use `--fields` when listing or getting resources to avoid overwhelming your context window.* * **Dry-Run Safety:** *Always use the `--dry-run` flag for mutating operations (create, update, delete) to validate your JSON payload before actual execution.* -* **Poll, Don't Guess:** *After scheduling a scan, poll `analyzer api analyzer scans status list` until it completes. Do not assume timing or make further requests against incomplete scans.* -* **One Step at a Time:** *Verify each step succeeded (exit code 0, valid JSON response) before proceeding to the next.* +* **Poll, Don't Guess:** *After scheduling a scan, poll the status endpoint until it completes. Do not assume timing or make further requests against incomplete scans.* ## Core Syntax -### Multi-API commands (service name is first positional arg) - ```bash +# API commands (service name is first positional arg) analyzer api [sub-resource...] [flags] -``` - -### Schema introspection (service name is first dotted segment) -```bash +# Schema introspection (service name is first dotted segment) analyzer schema .. -``` - -### Generate skills for all registered services - -```bash -analyzer generate-skills -``` -### Navigation / help - -```bash -analyzer --help +# Navigation analyzer api analyzer --help -analyzer api analyzer objects --help analyzer api analyzer scans --help -analyzer api analyzer scans overview --help -analyzer api analyzer scans compliance-check cyber-resilience-act --help ``` ### Key Flags -- `--params ''`: Path and query parameters (e.g., `id`, `limit`, `page`, `per-page`, `query`). -- `--json ''`: Request body for POST/PUT/PATCH methods. Must match the schema exactly. -- `--fields ''`: Limits the response fields (critical for AI context window efficiency). -- `--dry-run`: Validates and prints the request without executing. Use before every mutation. -- `--output json|table`: Output format. Default: `json`. Agents should always use `json`. -- `--discovery `: Override the discovery source for dev/testing (bypasses service registry). +| Flag | Purpose | +|------|---------| +| `--params ''` | Path and query parameters (e.g., `{"id": "..."}`) | +| `--json ''` | Request body for POST/PUT/PATCH methods | +| `--fields ''` | Comma-separated response fields to include (context window protection) | +| `--dry-run` | Validate and print the request without executing | +| `--format human\|json\|table` | Output format (default: `human`) | +| `--discovery ` | Override discovery source for dev/testing | -## Usage Patterns +## Schema Introspection -### 1. Reading Data (GET/LIST) - -Always use `--fields` to minimize tokens. +The CLI is self-documenting. Use `analyzer schema` to discover parameters, request/response schemas, and types at runtime — no static docs needed. ```bash -# List objects with field mask -analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name,tags" - -# Get a single object -analyzer api analyzer objects get --params '{"id": "OBJ_ID"}' +# Browse all resources for a service +analyzer schema analyzer.api -# Get scan details -analyzer api analyzer scans get --params '{"id": "SCAN_ID"}' +# Inspect a method's params, types, and defaults +analyzer schema analyzer.scans.create +analyzer schema analyzer.objects.get -# Check service health -analyzer api analyzer health list +# Browse a sub-resource tree +analyzer schema analyzer.scans.compliance-check ``` -### 2. Writing Data (POST/PUT/PATCH) +Use `analyzer schema` output to build your `--params` and `--json` flags. -Use `--json` for the request body. Always `--dry-run` first. - -```bash -# Create an object (firmware image) -analyzer api analyzer objects create --json '{"name": "Router FW v2.1", "description": "Edge router firmware", "tags": ["router", "v2.1"]}' --dry-run - -# Update an object -analyzer api analyzer objects update --params '{"id": "OBJ_ID"}' --json '{"name": "Router FW v2.1.1", "favorite": true}' --dry-run - -# Schedule a new scan -analyzer api analyzer scans create --json '{...}' --dry-run -``` +## Usage Patterns -### 3. Deleting Data +### Reading Data -Always `--dry-run` first. Deletions are irreversible. +Always use `--fields` to minimize tokens. ```bash -# Delete an object and all its scans -analyzer api analyzer objects delete --params '{"id": "OBJ_ID"}' --dry-run - -# Delete a scan -analyzer api analyzer scans delete --params '{"id": "SCAN_ID"}' --dry-run +# List objects (efficient) +analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name,tags" -# Delete a single document -analyzer api analyzer scans documents delete --params '{"id": "SCAN_ID", "file_name": "datasheet.pdf"}' --dry-run +# Get scan details +analyzer api analyzer scans get --params '{"id": "SCAN_ID"}' --fields "id,status,score" -# Delete all documents for a scan -analyzer api analyzer scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run +# Check service health +analyzer api analyzer health list ``` -### 4. Schema Introspection +### Writing Data -If unsure about parameters or body structure, check the schema: +Use `--json` for the request body. Always `--dry-run` first. ```bash analyzer schema analyzer.objects.create -analyzer schema analyzer.scans.create -analyzer schema analyzer.scans.results.get -analyzer schema analyzer.scans.compliance-check.cyber-resilience-act.list -``` - -### 5. Scan Lifecycle (Poll-Based) - -Scans are asynchronous. Schedule, poll, then retrieve results. - -```bash -# Schedule -analyzer api analyzer scans create --json '{...}' - -# Poll status until complete -analyzer api analyzer scans status list --params '{"id": "SCAN_ID"}' -# → {"id": "SCAN_ID", "status": "running"} -# → {"id": "SCAN_ID", "status": "finished"} - -# Cancel if needed -analyzer api analyzer scans cancel create --params '{"id": "SCAN_ID"}' -``` - -### 6. Scan Results and Scoring - -```bash -# Aggregated overview (CVE, hardening, crypto, SBOM, malware, kernel, etc.) -analyzer api analyzer scans overview list --params '{"id": "SCAN_ID"}' - -# Single analysis overview -analyzer api analyzer scans overview get --params '{"scan_id": "SCAN_ID", "analysis_id": "ANALYSIS_ID"}' - -# Security score -analyzer api analyzer scans score list --params '{"id": "SCAN_ID"}' - -# Detailed findings with pagination and filtering -analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "cve", "page": 1, "per-page": 50}' - -# SBOM export -analyzer api analyzer scans sbom list --params '{"id": "SCAN_ID"}' - -# Full report download -analyzer api analyzer scans report list --params '{"id": "SCAN_ID"}' +analyzer api analyzer objects create --json '{"name": "Router FW v2.1"}' --dry-run ``` -### 7. Compliance — Cyber Resilience Act (CRA) +### Deleting Data ```bash -# Get CRA compliance report -analyzer api analyzer scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' - -# Download CRA report as PDF -analyzer api analyzer scans compliance-check cyber-resilience-act report list --params '{"id": "SCAN_ID"}' - -# Overwrite a requirement (manual assessment) -analyzer api analyzer scans compliance-check cyber-resilience-act overwrite overwrite_compliance_check_requirement \ - --params '{"id": "SCAN_ID"}' --json '{...}' --dry-run - -# Trigger AI suggestion -analyzer api analyzer scans compliance-check cyber-resilience-act ai-suggestion begin create --params '{"id": "SCAN_ID"}' - -# Poll AI suggestion status -analyzer api analyzer scans compliance-check cyber-resilience-act ai-suggestion status list --params '{"id": "SCAN_ID"}' -``` - -### 8. Documents (Scan Attachments) - -```bash -# List documents for a scan -analyzer api analyzer scans documents list --params '{"id": "SCAN_ID"}' - -# Upload a document -analyzer api analyzer scans documents create --params '{"id": "SCAN_ID"}' - -# Delete a single document -analyzer api analyzer scans documents delete --params '{"id": "SCAN_ID", "file_name": "FILE"}' --dry-run - -# Delete all documents -analyzer api analyzer scans documents delete_documents --params '{"id": "SCAN_ID"}' --dry-run -``` - -### 9. Dev/Testing with Local Discovery Document - -```bash -# Override discovery source — bypasses service registry -analyzer --discovery ./analyzer-discovery.json api analyzer scans list - -# Or with a URL -analyzer --discovery https://staging.example.com/discovery.json api analyzer scans list +analyzer api analyzer objects delete --params '{"id": "OBJ_ID"}' --dry-run ``` -## Human-Friendly Commands (Unchanged) +## Human-Friendly Commands -The classic CLI subcommands work the same as before: +Classic CLI subcommands (unchanged by multi-service routing): ```bash analyzer login @@ -227,4 +110,4 @@ All errors are JSON on stderr with a non-zero exit code: {"error": {"code": 404, "message": "Object not found"}} ``` -Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. +Exit `0` = success, non-zero = failure. Parse error JSON to decide next steps. diff --git a/docs/skills.md b/docs/skills.md index c825afd..0189f57 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -7,6 +7,5 @@ | [analyzer-health](../skills/analyzer-health/SKILL.md) | API operations for analyzer-health. | | [analyzer-objects](../skills/analyzer-objects/SKILL.md) | API operations for analyzer-objects. | | [analyzer-scans](../skills/analyzer-scans/SKILL.md) | API operations for analyzer-scans. | -| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | API operations for analyzer-shared. | | [shared](../skills/shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling. | diff --git a/skills/shared/SKILL.md b/skills/shared/SKILL.md index 4b52578..8cbb11a 100644 --- a/skills/shared/SKILL.md +++ b/skills/shared/SKILL.md @@ -32,6 +32,7 @@ export ANALYZER_API_KEY="your-api-key" |------|-------------| | `--params ''` | Path and query parameters | | `--json ''` | Request body for POST/PUT/PATCH | +| `--fields ''` | Limit response fields (context window protection) | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | | `--format ` | Output format: `human` (default), `json` | diff --git a/src/api/executor.rs b/src/api/executor.rs index 5cdd0fc..fb61e2b 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -4,6 +4,7 @@ //! HTTP requests through the existing [`AnalyzerClient`]. use anyhow::{Context, Result}; +use serde_json::Value; use crate::client::AnalyzerClient; use crate::discovery::DiscoveryMethod; @@ -12,15 +13,18 @@ use crate::output::Format; /// Execute a single discovery method against the API. /// /// `client` is `None` when `dry_run` is true (no auth required). +/// `fields` is an optional comma-separated list of response field names +/// to keep (client-side filtering for context window protection). pub async fn execute_method( client: Option<&AnalyzerClient>, method: &DiscoveryMethod, params_json: Option<&str>, body_json: Option<&str>, + fields: Option<&str>, dry_run: bool, _format: Format, ) -> Result<()> { - let params: serde_json::Map = match params_json { + let params: serde_json::Map = match params_json { Some(s) => serde_json::from_str(s).context("invalid --params JSON")?, None => serde_json::Map::new(), }; @@ -32,7 +36,7 @@ pub async fn execute_method( let value = params .get(name) .map(|v| match v { - serde_json::Value::String(s) => s.clone(), + Value::String(s) => s.clone(), other => other.to_string().trim_matches('"').to_string(), }) .or_else(|| param_def.default.clone()) @@ -47,7 +51,7 @@ pub async fn execute_method( if param_def.location == "query" { if let Some(value) = params.get(name) { let val_str = match value { - serde_json::Value::String(s) => s.clone(), + Value::String(s) => s.clone(), other => other.to_string(), }; query_params.push((name.clone(), val_str)); @@ -56,11 +60,14 @@ pub async fn execute_method( } // Parse request body - let body: Option = match body_json { + let body: Option = match body_json { Some(s) => Some(serde_json::from_str(s).context("invalid --json body")?), None => None, }; + // Parse field mask + let field_list: Option> = fields.map(|f| f.split(',').map(str::trim).collect()); + if dry_run { let base = client.map(|c| c.base_url().as_str()).unwrap_or("/"); println!("{} {base}{url_path}", method.http_method); @@ -69,6 +76,9 @@ pub async fn execute_method( println!(" ?{k}={v}"); } } + if let Some(ref fl) = field_list { + println!(" Fields: {}", fl.join(", ")); + } if let Some(b) = &body { println!("{}", serde_json::to_string_pretty(b)?); } @@ -80,6 +90,64 @@ pub async fn execute_method( .execute_raw(&method.http_method, &url_path, &query_params, body.as_ref()) .await?; - println!("{}", serde_json::to_string_pretty(&response)?); + let output = match field_list { + Some(fl) => filter_fields(response, &fl), + None => response, + }; + + println!("{}", serde_json::to_string_pretty(&output)?); Ok(()) } + +/// Client-side field filtering for context window protection. +/// +/// Keeps only the specified top-level keys from JSON objects. +/// Arrays are filtered element-wise. Primitives pass through unchanged. +fn filter_fields(value: Value, fields: &[&str]) -> Value { + match value { + Value::Object(map) => { + let filtered: serde_json::Map<_, _> = map + .into_iter() + .filter(|(k, _)| fields.contains(&k.as_str())) + .collect(); + Value::Object(filtered) + } + Value::Array(arr) => { + Value::Array(arr.into_iter().map(|v| filter_fields(v, fields)).collect()) + } + other => other, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn filter_fields_object() { + let input = json!({"id": "abc", "name": "test", "score": 72, "tags": []}); + let result = filter_fields(input, &["id", "name"]); + assert_eq!(result, json!({"id": "abc", "name": "test"})); + } + + #[test] + fn filter_fields_array() { + let input = json!([ + {"id": "1", "name": "a", "extra": true}, + {"id": "2", "name": "b", "extra": false} + ]); + let result = filter_fields(input, &["id", "name"]); + assert_eq!( + result, + json!([{"id": "1", "name": "a"}, {"id": "2", "name": "b"}]) + ); + } + + #[test] + fn filter_fields_primitive_passthrough() { + let input = json!("hello"); + let result = filter_fields(input.clone(), &["id"]); + assert_eq!(result, input); + } +} diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 3305085..7a9923c 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -322,6 +322,7 @@ export ANALYZER_API_KEY="your-api-key" |------|-------------| | `--params ''` | Path and query parameters | | `--json ''` | Request body for POST/PUT/PATCH | +| `--fields ''` | Limit response fields (context window protection) | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | | `--format ` | Output format: `human` (default), `json` | diff --git a/src/api/mod.rs b/src/api/mod.rs index aab6901..2e3cab3 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -45,6 +45,12 @@ pub fn build_api_command(doc: &DiscoveryDocument) -> clap::Command { .action(clap::ArgAction::SetTrue) .help("Print the request without sending it") .global(true), + ) + .arg( + clap::Arg::new("fields") + .long("fields") + .help("Comma-separated response fields to include (e.g. \"id,name,score\")") + .global(true), ); cmd = add_resource_subcommands(cmd, api_resource); @@ -97,6 +103,7 @@ pub async fn dispatch( let params_json = get_global_arg(leaf_matches, matches, "params"); let body_json = get_global_arg(leaf_matches, matches, "json"); + let fields = get_global_arg(leaf_matches, matches, "fields"); let dry_run = leaf_matches.get_flag("dry-run") || matches.get_flag("dry-run"); if dry_run { @@ -105,6 +112,7 @@ pub async fn dispatch( method, params_json.as_deref(), body_json.as_deref(), + fields.as_deref(), true, format, ) @@ -118,6 +126,7 @@ pub async fn dispatch( method, params_json.as_deref(), body_json.as_deref(), + fields.as_deref(), false, format, ) From 109ae11acd4d4936b8469e12a2eefc7bdb345107 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 18:15:54 +0100 Subject: [PATCH 07/38] feat(dommyrock-analyzer-cli): add --paginate-all flag to not overload model context for longer reponses --- .gitignore | 1 + CONTEXT.md | 11 ++++ skills/shared/SKILL.md | 1 + src/api/executor.rs | 101 +++++++++++++++++++++++++++++++++---- src/api/generate_skills.rs | 1 + src/api/mod.rs | 38 ++++++++++++++ src/client/mod.rs | 19 +++++++ 7 files changed, 163 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 96ef6c0..c225fd1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /target Cargo.lock +usage_examples.txt \ No newline at end of file diff --git a/CONTEXT.md b/CONTEXT.md index 3da2632..961f11a 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -36,6 +36,7 @@ analyzer api analyzer scans --help | `--params ''` | Path and query parameters (e.g., `{"id": "..."}`) | | `--json ''` | Request body for POST/PUT/PATCH methods | | `--fields ''` | Comma-separated response fields to include (context window protection) | +| `--page-all` | Auto-paginate results as NDJSON (one JSON line per page) | | `--dry-run` | Validate and print the request without executing | | `--format human\|json\|table` | Output format (default: `human`) | | `--discovery ` | Override discovery source for dev/testing | @@ -84,6 +85,16 @@ analyzer schema analyzer.objects.create analyzer api analyzer objects create --json '{"name": "Router FW v2.1"}' --dry-run ``` +### Pagination (NDJSON) + +Use `--page-all` for large result sets. Output is one JSON line per page. + +```bash +analyzer api analyzer scans results get \ + --params '{"scan_id": "SCAN_ID", "analysis_id": "cve"}' \ + --page-all --fields "id,severity,score" +``` + ### Deleting Data ```bash diff --git a/skills/shared/SKILL.md b/skills/shared/SKILL.md index 8cbb11a..6dd7799 100644 --- a/skills/shared/SKILL.md +++ b/skills/shared/SKILL.md @@ -33,6 +33,7 @@ export ANALYZER_API_KEY="your-api-key" | `--params ''` | Path and query parameters | | `--json ''` | Request body for POST/PUT/PATCH | | `--fields ''` | Limit response fields (context window protection) | +| `--page-all` | Auto-paginate results as NDJSON | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | | `--format ` | Output format: `human` (default), `json` | diff --git a/src/api/executor.rs b/src/api/executor.rs index fb61e2b..7d5244a 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -2,6 +2,7 @@ //! //! Substitutes path parameters, collects query parameters, and dispatches //! HTTP requests through the existing [`AnalyzerClient`]. +//! Supports `--page-all` for auto-pagination with NDJSON output. use anyhow::{Context, Result}; use serde_json::Value; @@ -10,17 +11,40 @@ use crate::client::AnalyzerClient; use crate::discovery::DiscoveryMethod; use crate::output::Format; +/// Pagination configuration from `--page-all`, `--page-limit`, `--page-delay`. +pub struct PaginationConfig { + /// Whether to auto-paginate through all pages. + pub page_all: bool, + /// Maximum number of pages to fetch (default: 10). + pub page_limit: u32, + /// Delay between page fetches in milliseconds (default: 100). + pub page_delay_ms: u64, +} + +impl Default for PaginationConfig { + fn default() -> Self { + Self { + page_all: false, + page_limit: 10, + page_delay_ms: 100, + } + } +} + /// Execute a single discovery method against the API. /// /// `client` is `None` when `dry_run` is true (no auth required). /// `fields` is an optional comma-separated list of response field names /// to keep (client-side filtering for context window protection). +/// `pagination` controls auto-pagination with NDJSON output. +#[allow(clippy::too_many_arguments)] pub async fn execute_method( client: Option<&AnalyzerClient>, method: &DiscoveryMethod, params_json: Option<&str>, body_json: Option<&str>, fields: Option<&str>, + pagination: &PaginationConfig, dry_run: bool, _format: Format, ) -> Result<()> { @@ -69,7 +93,9 @@ pub async fn execute_method( let field_list: Option> = fields.map(|f| f.split(',').map(str::trim).collect()); if dry_run { - let base = client.map(|c| c.base_url().as_str()).unwrap_or("/"); + let base = client + .map(|c| c.base_url().as_str()) + .unwrap_or("/"); println!("{} {base}{url_path}", method.http_method); if !query_params.is_empty() { for (k, v) in &query_params { @@ -79,6 +105,12 @@ pub async fn execute_method( if let Some(ref fl) = field_list { println!(" Fields: {}", fl.join(", ")); } + if pagination.page_all { + println!( + " Pagination: --page-all (limit: {}, delay: {}ms)", + pagination.page_limit, pagination.page_delay_ms + ); + } if let Some(b) = &body { println!("{}", serde_json::to_string_pretty(b)?); } @@ -86,16 +118,59 @@ pub async fn execute_method( } let client = client.context("API client required for non-dry-run execution")?; - let response = client - .execute_raw(&method.http_method, &url_path, &query_params, body.as_ref()) - .await?; - let output = match field_list { - Some(fl) => filter_fields(response, &fl), - None => response, - }; + // -- Pagination loop (or single request) ---------------------------------- + + let mut pages_fetched: u32 = 0; + let mut next_url: Option = None; + + loop { + let response = match &next_url { + Some(url) => client.execute_raw_url(url).await?, + None => { + client + .execute_raw(&method.http_method, &url_path, &query_params, body.as_ref()) + .await? + } + }; + + pages_fetched += 1; + + let filtered = match &field_list { + Some(fl) => filter_fields(response.clone(), fl), + None => response.clone(), + }; + + if pagination.page_all { + // NDJSON: one compact JSON line per page + println!("{}", serde_json::to_string(&filtered)?); + } else { + println!("{}", serde_json::to_string_pretty(&filtered)?); + break; + } + + // Extract _links.next.href for continuation + let has_next = response + .get("_links") + .and_then(|l| l.get("next")) + .and_then(|n| n.get("href")) + .and_then(|h| h.as_str()) + .map(String::from); + + match has_next { + Some(url) if pages_fetched < pagination.page_limit => { + next_url = Some(url); + if pagination.page_delay_ms > 0 { + tokio::time::sleep(std::time::Duration::from_millis( + pagination.page_delay_ms, + )) + .await; + } + } + _ => break, + } + } - println!("{}", serde_json::to_string_pretty(&output)?); Ok(()) } @@ -150,4 +225,12 @@ mod tests { let result = filter_fields(input.clone(), &["id"]); assert_eq!(result, input); } + + #[test] + fn pagination_config_default() { + let cfg = PaginationConfig::default(); + assert!(!cfg.page_all); + assert_eq!(cfg.page_limit, 10); + assert_eq!(cfg.page_delay_ms, 100); + } } diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 7a9923c..57ae3a1 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -323,6 +323,7 @@ export ANALYZER_API_KEY="your-api-key" | `--params ''` | Path and query parameters | | `--json ''` | Request body for POST/PUT/PATCH | | `--fields ''` | Limit response fields (context window protection) | +| `--page-all` | Auto-paginate results as NDJSON | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | | `--format ` | Output format: `human` (default), `json` | diff --git a/src/api/mod.rs b/src/api/mod.rs index 2e3cab3..8fe3948 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -51,6 +51,27 @@ pub fn build_api_command(doc: &DiscoveryDocument) -> clap::Command { .long("fields") .help("Comma-separated response fields to include (e.g. \"id,name,score\")") .global(true), + ) + .arg( + clap::Arg::new("page-all") + .long("page-all") + .action(clap::ArgAction::SetTrue) + .help("Auto-paginate and output NDJSON (one JSON object per line)") + .global(true), + ) + .arg( + clap::Arg::new("page-limit") + .long("page-limit") + .default_value("10") + .help("Maximum number of pages to fetch (default: 10)") + .global(true), + ) + .arg( + clap::Arg::new("page-delay") + .long("page-delay") + .default_value("100") + .help("Delay between page fetches in milliseconds (default: 100)") + .global(true), ); cmd = add_resource_subcommands(cmd, api_resource); @@ -105,6 +126,21 @@ pub async fn dispatch( let body_json = get_global_arg(leaf_matches, matches, "json"); let fields = get_global_arg(leaf_matches, matches, "fields"); let dry_run = leaf_matches.get_flag("dry-run") || matches.get_flag("dry-run"); + let page_all = leaf_matches.get_flag("page-all") || matches.get_flag("page-all"); + let page_limit: u32 = get_global_arg(leaf_matches, matches, "page-limit") + .unwrap_or_else(|| "10".into()) + .parse() + .unwrap_or(10); + let page_delay: u64 = get_global_arg(leaf_matches, matches, "page-delay") + .unwrap_or_else(|| "100".into()) + .parse() + .unwrap_or(100); + + let pagination = executor::PaginationConfig { + page_all, + page_limit, + page_delay_ms: page_delay, + }; if dry_run { return executor::execute_method( @@ -113,6 +149,7 @@ pub async fn dispatch( params_json.as_deref(), body_json.as_deref(), fields.as_deref(), + &pagination, true, format, ) @@ -127,6 +164,7 @@ pub async fn dispatch( params_json.as_deref(), body_json.as_deref(), fields.as_deref(), + &pagination, false, format, ) diff --git a/src/client/mod.rs b/src/client/mod.rs index 6af3465..18617f1 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -291,6 +291,25 @@ impl AnalyzerClient { } } + /// Execute a raw GET request to an absolute URL (for following pagination links). + /// + /// Used by `--page-all` to follow `_links.next.href` URLs which are absolute. + pub async fn execute_raw_url(&self, url: &str) -> Result { + let resp = self.client.get(url).send().await?; + let status = resp.status(); + if status.is_success() { + let text = resp.text().await?; + if text.is_empty() { + Ok(serde_json::json!({"status": "ok"})) + } else { + serde_json::from_str(&text).context("response is not valid JSON") + } + } else { + let body = resp.text().await.unwrap_or_default(); + bail!("API error (HTTP {status}): {body}"); + } + } + /// Expose the base URL for dry-run output. pub fn base_url(&self) -> &Url { &self.base_url From 3f4e9b16e59c7e7f5cd771195ebf5ec6094605b9 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 19:02:22 +0100 Subject: [PATCH 08/38] feat(dommyrock-analyzer-cli): update test structure and add pagination variation match and tests --- src/api/executor.rs | 408 ++++++++++++++++++++++++++++++------- src/api/generate_skills.rs | 28 +-- src/api/mod.rs | 5 +- src/discovery.rs | 21 +- src/lib.rs | 10 + src/main.rs | 11 +- tests/api_executor.rs | 353 ++++++++++++++++++++++++++++++++ 7 files changed, 732 insertions(+), 104 deletions(-) create mode 100644 src/lib.rs create mode 100644 tests/api_executor.rs diff --git a/src/api/executor.rs b/src/api/executor.rs index 7d5244a..7048da3 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -6,9 +6,10 @@ use anyhow::{Context, Result}; use serde_json::Value; +use std::collections::BTreeMap; use crate::client::AnalyzerClient; -use crate::discovery::DiscoveryMethod; +use crate::discovery::{DiscoveryMethod, DiscoveryParameter}; use crate::output::Format; /// Pagination configuration from `--page-all`, `--page-limit`, `--page-delay`. @@ -31,6 +32,15 @@ impl Default for PaginationConfig { } } +/// What the next pagination step looks like. +#[derive(Debug)] +enum NextPage { + /// Follow an absolute URL (from `_links.next.href`). + Url(String), + /// Increment the `page` query param (offset-based pagination). + IncrementPage { next_page: u32 }, +} + /// Execute a single discovery method against the API. /// /// `client` is `None` when `dry_run` is true (no auth required). @@ -53,35 +63,8 @@ pub async fn execute_method( None => serde_json::Map::new(), }; - // Substitute path parameters: `api/scans/{id}/score` → `api/scans/abc-123/score` - let mut url_path = method.path.clone(); - for (name, param_def) in &method.parameters { - if param_def.location == "path" { - let value = params - .get(name) - .map(|v| match v { - Value::String(s) => s.clone(), - other => other.to_string().trim_matches('"').to_string(), - }) - .or_else(|| param_def.default.clone()) - .with_context(|| format!("required path parameter '{name}' not provided"))?; - url_path = url_path.replace(&format!("{{{name}}}"), &value); - } - } - - // Collect query parameters - let mut query_params: Vec<(String, String)> = Vec::new(); - for (name, param_def) in &method.parameters { - if param_def.location == "query" { - if let Some(value) = params.get(name) { - let val_str = match value { - Value::String(s) => s.clone(), - other => other.to_string(), - }; - query_params.push((name.clone(), val_str)); - } - } - } + let url_path = substitute_path_params(&method.path, &method.parameters, ¶ms)?; + let mut query_params = collect_query_params(&method.parameters, ¶ms); // Parse request body let body: Option = match body_json { @@ -149,22 +132,28 @@ pub async fn execute_method( break; } - // Extract _links.next.href for continuation - let has_next = response - .get("_links") - .and_then(|l| l.get("next")) - .and_then(|n| n.get("href")) - .and_then(|h| h.as_str()) - .map(String::from); - - match has_next { - Some(url) if pages_fetched < pagination.page_limit => { + // Determine next page (try link-based, then offset-based) + match extract_next_page(&response, &query_params) { + Some(NextPage::Url(url)) if pages_fetched < pagination.page_limit => { next_url = Some(url); if pagination.page_delay_ms > 0 { - tokio::time::sleep(std::time::Duration::from_millis( - pagination.page_delay_ms, - )) - .await; + tokio::time::sleep(std::time::Duration::from_millis(pagination.page_delay_ms)) + .await; + } + } + Some(NextPage::IncrementPage { next_page }) + if pages_fetched < pagination.page_limit => + { + // Update or insert the page query param for the next request + if let Some(p) = query_params.iter_mut().find(|(k, _)| k == "page") { + p.1 = next_page.to_string(); + } else { + query_params.push(("page".to_string(), next_page.to_string())); + } + next_url = None; // re-use normal path with updated query params + if pagination.page_delay_ms > 0 { + tokio::time::sleep(std::time::Duration::from_millis(pagination.page_delay_ms)) + .await; } } _ => break, @@ -174,6 +163,103 @@ pub async fn execute_method( Ok(()) } +/// Substitute path parameters in the URL template. +/// +/// `api/scans/{id}/score` + `{"id": "abc-123"}` → `api/scans/abc-123/score` +fn substitute_path_params( + path: &str, + parameters: &BTreeMap, + params: &serde_json::Map, +) -> Result { + let mut url_path = path.to_string(); + for (name, param_def) in parameters { + if param_def.location == "path" { + let value = params + .get(name) + .map(|v| match v { + Value::String(s) => s.clone(), + other => other.to_string().trim_matches('"').to_string(), + }) + .or_else(|| param_def.default.clone()) + .with_context(|| format!("required path parameter '{name}' not provided"))?; + url_path = url_path.replace(&format!("{{{name}}}"), &value); + } + } + Ok(url_path) +} + +/// Collect query parameters from user-provided params that match discovery query params. +fn collect_query_params( + parameters: &BTreeMap, + params: &serde_json::Map, +) -> Vec<(String, String)> { + let mut query_params = Vec::new(); + for (name, param_def) in parameters { + if param_def.location == "query" { + if let Some(value) = params.get(name) { + let val_str = match value { + Value::String(s) => s.clone(), + other => other.to_string(), + }; + query_params.push((name.clone(), val_str)); + } + } + } + query_params +} + +/// Determine how to fetch the next page of results. +/// +/// Tries two strategies in order: +/// 1. **Link-based** (`_links.next.href`) — follow the absolute URL directly. +/// Used by endpoints that return `Page` wrappers. +/// 2. **Offset-based** (`total-findings` + `page`/`per-page`) — increment the +/// `page` query param. Used by `scans.results.get` and similar endpoints. +fn extract_next_page( + response: &Value, + current_query_params: &[(String, String)], +) -> Option { + // 1. Try _links.next.href (link-based pagination) + if let Some(url) = response + .get("_links") + .and_then(|l| l.get("next")) + .and_then(|n| n.get("href")) + .and_then(|h| h.as_str()) + { + if !url.is_empty() { + return Some(NextPage::Url(url.to_string())); + } + } + + // 2. Try offset-based: total-findings + page/per-page + let total = response + .get("total-findings") + .and_then(|v| v.as_u64()) + .unwrap_or(0); + if total == 0 { + return None; + } + + let current_page: u32 = current_query_params + .iter() + .find(|(k, _)| k == "page") + .and_then(|(_, v)| v.parse().ok()) + .unwrap_or(1); + let per_page: u32 = current_query_params + .iter() + .find(|(k, _)| k == "per-page") + .and_then(|(_, v)| v.parse().ok()) + .unwrap_or(25); + + if (current_page as u64) * (per_page as u64) < total { + Some(NextPage::IncrementPage { + next_page: current_page + 1, + }) + } else { + None + } +} + /// Client-side field filtering for context window protection. /// /// Keeps only the specified top-level keys from JSON objects. @@ -199,38 +285,218 @@ mod tests { use super::*; use serde_json::json; - #[test] - fn filter_fields_object() { - let input = json!({"id": "abc", "name": "test", "score": 72, "tags": []}); - let result = filter_fields(input, &["id", "name"]); - assert_eq!(result, json!({"id": "abc", "name": "test"})); + fn param(location: &str, required: bool, default: Option<&str>) -> DiscoveryParameter { + DiscoveryParameter { + param_type: "string".to_string(), + required, + location: location.to_string(), + description: None, + format: None, + enum_values: None, + default: default.map(String::from), + } + } + + mod filter_fields { + use super::*; + + #[test] + fn should_keep_only_specified_keys() { + let input = json!({"id": "abc", "name": "test", "score": 72, "tags": []}); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!({"id": "abc", "name": "test"}) + ); + } + + #[test] + fn should_filter_each_array_element() { + let input = json!([ + {"id": "1", "name": "a", "extra": true}, + {"id": "2", "name": "b", "extra": false} + ]); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!([{"id": "1", "name": "a"}, {"id": "2", "name": "b"}]) + ); + } + + #[test] + fn should_pass_through_primitives() { + let input = json!("hello"); + assert_eq!(super::super::filter_fields(input.clone(), &["id"]), input); + } + } + + mod substitute_path_params { + use super::*; + + #[test] + fn should_replace_single_param() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + let params = + serde_json::from_str::>(r#"{"id": "scan-123"}"#) + .unwrap(); + + let result = + super::super::substitute_path_params("api/scans/{id}", ¶meters, ¶ms) + .unwrap(); + assert_eq!(result, "api/scans/scan-123"); + } + + #[test] + fn should_replace_multiple_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("scan_id".to_string(), param("path", true, None)); + parameters.insert("analysis_id".to_string(), param("path", true, None)); + let params = serde_json::from_str::>( + r#"{"scan_id": "s-1", "analysis_id": "cve"}"#, + ) + .unwrap(); + + let result = super::super::substitute_path_params( + "api/scans/{scan_id}/results/{analysis_id}", + ¶meters, + ¶ms, + ) + .unwrap(); + assert_eq!(result, "api/scans/s-1/results/cve"); + } + + #[test] + fn should_error_on_missing_required() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + + let result = super::super::substitute_path_params( + "api/scans/{id}", + ¶meters, + &serde_json::Map::new(), + ); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("id")); + } + + #[test] + fn should_use_default_value() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, Some("default-id"))); + + let result = super::super::substitute_path_params( + "api/scans/{id}", + ¶meters, + &serde_json::Map::new(), + ) + .unwrap(); + assert_eq!(result, "api/scans/default-id"); + } } - #[test] - fn filter_fields_array() { - let input = json!([ - {"id": "1", "name": "a", "extra": true}, - {"id": "2", "name": "b", "extra": false} - ]); - let result = filter_fields(input, &["id", "name"]); - assert_eq!( - result, - json!([{"id": "1", "name": "a"}, {"id": "2", "name": "b"}]) - ); + mod collect_query_params { + use super::*; + + #[test] + fn should_include_query_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("limit".to_string(), param("query", false, None)); + parameters.insert("page".to_string(), param("query", false, None)); + let params = serde_json::from_str::>( + r#"{"limit": "10", "page": "2"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 2); + assert!(result.contains(&("limit".to_string(), "10".to_string()))); + } + + #[test] + fn should_exclude_path_params() { + let mut parameters = BTreeMap::new(); + parameters.insert("id".to_string(), param("path", true, None)); + parameters.insert("limit".to_string(), param("query", false, None)); + let params = serde_json::from_str::>( + r#"{"id": "scan-1", "limit": "10"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 1); + assert_eq!(result[0].0, "limit"); + } } - #[test] - fn filter_fields_primitive_passthrough() { - let input = json!("hello"); - let result = filter_fields(input.clone(), &["id"]); - assert_eq!(result, input); + mod extract_next_page { + use super::*; + + #[test] + fn should_follow_links_href() { + let response = json!({ + "data": [{"id": "1"}], + "_links": {"next": {"href": "https://api.example.com/scans?page=2"}} + }); + match super::super::extract_next_page(&response, &[]) { + Some(NextPage::Url(url)) => { + assert_eq!(url, "https://api.example.com/scans?page=2"); + } + other => panic!("expected NextPage::Url, got {other:?}"), + } + } + + #[test] + fn should_increment_page_for_offset() { + let response = json!({"findings": [], "total-findings": 100}); + let qp = vec![ + ("page".to_string(), "1".to_string()), + ("per-page".to_string(), "25".to_string()), + ]; + match super::super::extract_next_page(&response, &qp) { + Some(NextPage::IncrementPage { next_page }) => assert_eq!(next_page, 2), + other => panic!("expected IncrementPage, got {other:?}"), + } + + // Also works with default page/per-page (no query params) + match super::super::extract_next_page(&response, &[]) { + Some(NextPage::IncrementPage { next_page }) => assert_eq!(next_page, 2), + other => panic!("expected IncrementPage with defaults, got {other:?}"), + } + } + + #[test] + fn should_stop_on_last_page() { + let response = json!({"findings": [], "total-findings": 25}); + let qp = vec![ + ("page".to_string(), "1".to_string()), + ("per-page".to_string(), "25".to_string()), + ]; + assert!(super::super::extract_next_page(&response, &qp).is_none()); + } + + #[test] + fn should_return_none_without_signals() { + // No _links, no total-findings + assert!(super::super::extract_next_page(&json!({"data": []}), &[]).is_none()); + // Empty _links + assert!( + super::super::extract_next_page(&json!({"_links": {}}), &[]).is_none() + ); + // Null next + assert!( + super::super::extract_next_page(&json!({"_links": {"next": null}}), &[]).is_none() + ); + } } - #[test] - fn pagination_config_default() { - let cfg = PaginationConfig::default(); - assert!(!cfg.page_all); - assert_eq!(cfg.page_limit, 10); - assert_eq!(cfg.page_delay_ms, 100); + mod pagination_config { + use super::*; + + #[test] + fn should_have_sensible_defaults() { + let cfg = PaginationConfig::default(); + assert!(!cfg.page_all); + assert_eq!(cfg.page_limit, 10); + assert_eq!(cfg.page_delay_ms, 100); + } } } diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 57ae3a1..bef13a0 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -16,7 +16,7 @@ use std::path::Path; use anyhow::{Context, Result}; use crate::discovery::{DiscoveryDocument, DiscoveryResource}; -use crate::services::{ServiceEntry, SERVICES}; +use crate::services::{SERVICES, ServiceEntry}; struct SkillIndexEntry { name: String, @@ -38,8 +38,12 @@ pub fn generate_for_service( .get("api") .context("discovery document must have an 'api' resource")?; - std::fs::create_dir_all(output_dir) - .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; + std::fs::create_dir_all(output_dir).with_context(|| { + format!( + "failed to create output directory: {}", + output_dir.display() + ) + })?; for (resource_name, resource) in &api_resource.resources { let skill_name = format!("{alias}-{resource_name}"); @@ -59,8 +63,12 @@ pub fn generate_for_service( /// Generate the shared skill file (service-agnostic: auth, flags, patterns). pub fn generate_shared(output_dir: &Path) -> Result<()> { - std::fs::create_dir_all(output_dir) - .with_context(|| format!("failed to create output directory: {}", output_dir.display()))?; + std::fs::create_dir_all(output_dir).with_context(|| { + format!( + "failed to create output directory: {}", + output_dir.display() + ) + })?; let shared_dir = output_dir.join("shared"); std::fs::create_dir_all(&shared_dir)?; @@ -122,8 +130,7 @@ pub fn write_skills_index(output_dir: &Path) -> Result<()> { let docs_dir = Path::new("docs"); std::fs::create_dir_all(docs_dir).context("failed to create docs directory")?; let path = docs_dir.join("skills.md"); - std::fs::write(&path, &out) - .with_context(|| format!("failed to write {}", path.display()))?; + std::fs::write(&path, &out).with_context(|| format!("failed to write {}", path.display()))?; println!(" wrote {}", path.display()); Ok(()) } @@ -213,12 +220,7 @@ fn render_methods( methods: &std::collections::BTreeMap, ) { for (method_name, method) in methods { - let desc = truncate_desc( - method - .description - .as_deref() - .unwrap_or(""), - ); + let desc = truncate_desc(method.description.as_deref().unwrap_or("")); writeln!(out, " - `{method_name}` — {desc}").unwrap(); } } diff --git a/src/api/mod.rs b/src/api/mod.rs index 8fe3948..8d3bc02 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -9,8 +9,8 @@ pub mod schema; use anyhow::{Context, Result}; -use crate::config; use crate::client::AnalyzerClient; +use crate::config; use crate::discovery::{self, DiscoveryDocument, DiscoveryResource}; use crate::output::Format; @@ -91,8 +91,7 @@ fn add_resource_subcommands( } for (resource_name, child_resource) in &resource.resources { - let mut child_cmd = - clap::Command::new(resource_name.clone()).arg_required_else_help(true); + let mut child_cmd = clap::Command::new(resource_name.clone()).arg_required_else_help(true); child_cmd = add_resource_subcommands(child_cmd, child_resource); parent = parent.subcommand(child_cmd); } diff --git a/src/discovery.rs b/src/discovery.rs index 9b43f2a..8ecc310 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -117,11 +117,13 @@ pub async fn load(source: &DiscoverySource) -> Result { let json_str = match source { DiscoverySource::File(path) => std::fs::read_to_string(path) .with_context(|| format!("failed to read discovery file: {}", path.display()))?, - DiscoverySource::Url(url) => reqwest::get(url) - .await - .with_context(|| format!("failed to fetch discovery document from {url}"))? - .text() - .await?, + DiscoverySource::Url(url) => { + reqwest::get(url) + .await + .with_context(|| format!("failed to fetch discovery document from {url}"))? + .text() + .await? + } }; serde_json::from_str(&json_str).context("failed to parse discovery document") } @@ -159,16 +161,15 @@ pub async fn load_for_service(entry: &ServiceEntry) -> Result // Write cache (best-effort) if let Err(e) = std::fs::write(&cache_file, &json) { - eprintln!("warning: failed to write cache {}: {e}", cache_file.display()); + eprintln!( + "warning: failed to write cache {}: {e}", + cache_file.display() + ); } serde_json::from_str(&json).context("failed to parse discovery document") } -// --------------------------------------------------------------------------- -// Lookup helpers -// --------------------------------------------------------------------------- - /// Resolve a method by walking the resource tree with a path like `["scans", "score", "list"]`. pub fn resolve_method<'a>( resource: &'a DiscoveryResource, diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..188cf4c --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,10 @@ +//! Library re-exports for integration tests. +//! +//! The binary is in `main.rs`; this crate exposes modules needed by `tests/`. + +pub mod api; +pub mod client; +pub mod config; +pub mod discovery; +pub mod output; +pub mod services; diff --git a/src/main.rs b/src/main.rs index 1e5b474..d39b2b3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -561,9 +561,9 @@ async fn run(cli: Cli) -> Result<()> { }; let api_cmd = api::build_api_command(&doc); - let api_matches = match api_cmd - .try_get_matches_from(std::iter::once("api".to_string()).chain(rest_args.iter().cloned())) - { + let api_matches = match api_cmd.try_get_matches_from( + std::iter::once("api".to_string()).chain(rest_args.iter().cloned()), + ) { Ok(m) => m, Err(e) => { // Let clap handle --help and --version display directly @@ -615,10 +615,7 @@ async fn run(cli: Cli) -> Result<()> { services::SERVICES.len() ); for entry in services::SERVICES { - println!( - "\n Service: {} ({})", - entry.aliases[0], entry.api_name - ); + println!("\n Service: {} ({})", entry.aliases[0], entry.api_name); let doc = if let Some(flag) = discovery_flag.as_deref() { // --discovery provided: use it (single service mode for dev) let source = discovery::resolve_source(Some(flag))?; diff --git a/tests/api_executor.rs b/tests/api_executor.rs new file mode 100644 index 0000000..e5fb681 --- /dev/null +++ b/tests/api_executor.rs @@ -0,0 +1,353 @@ +//! Integration tests for the discovery-driven API executor. +//! +//! Uses wiremock to mock HTTP responses and validates the full execution path: +//! path param substitution, pagination (link-based + offset-based), --fields, +//! and --dry-run. + +use serde_json::json; +use wiremock::matchers::{method, path, query_param, query_param_is_missing}; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +use analyzer_cli::api::executor::{execute_method, PaginationConfig}; +use analyzer_cli::client::AnalyzerClient; +use analyzer_cli::discovery::{DiscoveryMethod, DiscoveryParameter}; +use analyzer_cli::output::Format; + +fn test_client(server: &MockServer) -> AnalyzerClient { + let url: url::Url = server.uri().parse().expect("valid mock server URL"); + AnalyzerClient::new(url, "test-key").expect("client creation") +} + +fn test_method(http_method: &str, api_path: &str) -> DiscoveryMethod { + DiscoveryMethod { + id: "test.method".to_string(), + http_method: http_method.to_string(), + path: api_path.to_string(), + description: Some("Test method".to_string()), + parameters: std::collections::BTreeMap::new(), + parameter_order: vec![], + request: None, + response: None, + scopes: vec![], + } +} + +fn test_method_with_path_param(api_path: &str) -> DiscoveryMethod { + let mut params = std::collections::BTreeMap::new(); + params.insert( + "id".to_string(), + DiscoveryParameter { + param_type: "string".to_string(), + required: true, + location: "path".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + DiscoveryMethod { + id: "test.method".to_string(), + http_method: "GET".to_string(), + path: api_path.to_string(), + description: None, + parameters: params, + parameter_order: vec!["id".to_string()], + request: None, + response: None, + scopes: vec![], + } +} + +fn test_method_with_pagination(api_path: &str) -> DiscoveryMethod { + let mut params = std::collections::BTreeMap::new(); + params.insert( + "scan_id".to_string(), + DiscoveryParameter { + param_type: "string".to_string(), + required: true, + location: "path".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + for name in ["page", "per-page"] { + params.insert( + name.to_string(), + DiscoveryParameter { + param_type: "integer".to_string(), + required: false, + location: "query".to_string(), + description: None, + format: None, + enum_values: None, + default: None, + }, + ); + } + DiscoveryMethod { + id: "test.results.get".to_string(), + http_method: "GET".to_string(), + path: api_path.to_string(), + description: None, + parameters: params, + parameter_order: vec!["scan_id".to_string()], + request: None, + response: None, + scopes: vec![], + } +} + +// --------------------------------------------------------------------------- +// Path params +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn should_substitute_path_params_in_url() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/scans/scan-abc/score")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({"score": 72}))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}/score"), + Some(r#"{"id": "scan-abc"}"#), + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_error_on_missing_required_path_param() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}"), + None, + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("id")); +} + +// --------------------------------------------------------------------------- +// Dry-run +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn should_not_make_requests_in_dry_run() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + Some("id,name"), + &PaginationConfig { + page_all: true, + page_limit: 5, + page_delay_ms: 0, + }, + true, + Format::Json, + ) + .await; + assert!(result.is_ok()); + assert_eq!(server.received_requests().await.unwrap().len(), 0); +} + +// --------------------------------------------------------------------------- +// Fields +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn should_apply_fields_filter_to_response() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/objects")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "id": "obj-1", "name": "Router FW", "description": "Long text", "score": 85 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/objects"), + None, + None, + Some("id,name"), + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} + +// --------------------------------------------------------------------------- +// Pagination — link-based (_links.next.href) +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn should_follow_links_href_across_pages() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param_is_missing("page")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param("page", "2")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-2"}], "_links": {} + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_respect_page_limit() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { page_all: true, page_limit: 1, page_delay_ms: 0 }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} + +// --------------------------------------------------------------------------- +// Pagination — offset-based (page/per-page + total-findings) +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn should_paginate_with_offset_params() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "1")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "2")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-2"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_stop_offset_pagination_on_last_page() { + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 10 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); +} From 6663c87a198eeb21f9b3a57ed9bdec60e2704e05 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 17 Mar 2026 19:02:58 +0100 Subject: [PATCH 09/38] feat(dommyrock-analyzer-cli): cargo fmt & clippy --- src/api/executor.rs | 4 +- tests/api_executor.rs | 458 ++++++++++++++++++++++-------------------- 2 files changed, 238 insertions(+), 224 deletions(-) diff --git a/src/api/executor.rs b/src/api/executor.rs index 7048da3..15a555f 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -478,9 +478,7 @@ mod tests { // No _links, no total-findings assert!(super::super::extract_next_page(&json!({"data": []}), &[]).is_none()); // Empty _links - assert!( - super::super::extract_next_page(&json!({"_links": {}}), &[]).is_none() - ); + assert!(super::super::extract_next_page(&json!({"_links": {}}), &[]).is_none()); // Null next assert!( super::super::extract_next_page(&json!({"_links": {"next": null}}), &[]).is_none() diff --git a/tests/api_executor.rs b/tests/api_executor.rs index e5fb681..67da535 100644 --- a/tests/api_executor.rs +++ b/tests/api_executor.rs @@ -8,7 +8,7 @@ use serde_json::json; use wiremock::matchers::{method, path, query_param, query_param_is_missing}; use wiremock::{Mock, MockServer, ResponseTemplate}; -use analyzer_cli::api::executor::{execute_method, PaginationConfig}; +use analyzer_cli::api::executor::{PaginationConfig, execute_method}; use analyzer_cli::client::AnalyzerClient; use analyzer_cli::discovery::{DiscoveryMethod, DiscoveryParameter}; use analyzer_cli::output::Format; @@ -100,254 +100,270 @@ fn test_method_with_pagination(api_path: &str) -> DiscoveryMethod { } } -// --------------------------------------------------------------------------- -// Path params -// --------------------------------------------------------------------------- +mod path_params { + use super::*; -#[tokio::test] -async fn should_substitute_path_params_in_url() { - let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/api/scans/scan-abc/score")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({"score": 72}))) - .expect(1) - .mount(&server) - .await; + #[tokio::test] + async fn should_substitute_in_url() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/scans/scan-abc/score")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({"score": 72}))) + .expect(1) + .mount(&server) + .await; - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method_with_path_param("api/scans/{id}/score"), - Some(r#"{"id": "scan-abc"}"#), - None, - None, - &PaginationConfig::default(), - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); -} + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}/score"), + Some(r#"{"id": "scan-abc"}"#), + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } -#[tokio::test] -async fn should_error_on_missing_required_path_param() { - let server = MockServer::start().await; - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method_with_path_param("api/scans/{id}"), - None, - None, - None, - &PaginationConfig::default(), - false, - Format::Json, - ) - .await; - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("id")); + #[tokio::test] + async fn should_error_on_missing_required() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_path_param("api/scans/{id}"), + None, + None, + None, + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("id")); + } } -// --------------------------------------------------------------------------- -// Dry-run -// --------------------------------------------------------------------------- +mod dry_run { + use super::*; -#[tokio::test] -async fn should_not_make_requests_in_dry_run() { - let server = MockServer::start().await; - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method("GET", "api/scans"), - None, - None, - Some("id,name"), - &PaginationConfig { - page_all: true, - page_limit: 5, - page_delay_ms: 0, - }, - true, - Format::Json, - ) - .await; - assert!(result.is_ok()); - assert_eq!(server.received_requests().await.unwrap().len(), 0); + #[tokio::test] + async fn should_not_make_requests() { + let server = MockServer::start().await; + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + Some("id,name"), + &PaginationConfig { + page_all: true, + page_limit: 5, + page_delay_ms: 0, + }, + true, + Format::Json, + ) + .await; + assert!(result.is_ok()); + assert_eq!(server.received_requests().await.unwrap().len(), 0); + } } -// --------------------------------------------------------------------------- -// Fields -// --------------------------------------------------------------------------- +mod fields { + use super::*; -#[tokio::test] -async fn should_apply_fields_filter_to_response() { - let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/api/objects")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "id": "obj-1", "name": "Router FW", "description": "Long text", "score": 85 - }))) - .expect(1) - .mount(&server) - .await; + #[tokio::test] + async fn should_apply_filter_to_response() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/objects")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "id": "obj-1", "name": "Router FW", "description": "Long text", "score": 85 + }))) + .expect(1) + .mount(&server) + .await; - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method("GET", "api/objects"), - None, - None, - Some("id,name"), - &PaginationConfig::default(), - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/objects"), + None, + None, + Some("id,name"), + &PaginationConfig::default(), + false, + Format::Json, + ) + .await; + assert!(result.is_ok()); + } } -// --------------------------------------------------------------------------- -// Pagination — link-based (_links.next.href) -// --------------------------------------------------------------------------- +mod pagination_link_based { + use super::*; -#[tokio::test] -async fn should_follow_links_href_across_pages() { - let server = MockServer::start().await; + #[tokio::test] + async fn should_follow_links_href_across_pages() { + let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/api/scans")) - .and(query_param_is_missing("page")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "data": [{"id": "s-1"}], - "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} - }))) - .expect(1) - .mount(&server) - .await; + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param_is_missing("page")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/api/scans")) + .and(query_param("page", "2")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-2"}], "_links": {} + }))) + .expect(1) + .mount(&server) + .await; - Mock::given(method("GET")) - .and(path("/api/scans")) - .and(query_param("page", "2")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "data": [{"id": "s-2"}], "_links": {} - }))) - .expect(1) - .mount(&server) + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) .await; + assert!(result.is_ok()); + } - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method("GET", "api/scans"), - None, - None, - None, - &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); -} + #[tokio::test] + async fn should_respect_page_limit() { + let server = MockServer::start().await; -#[tokio::test] -async fn should_respect_page_limit() { - let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/scans")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": [{"id": "s-1"}], + "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} + }))) + .expect(1) + .mount(&server) + .await; - Mock::given(method("GET")) - .and(path("/api/scans")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "data": [{"id": "s-1"}], - "_links": {"next": {"href": format!("{}/api/scans?page=2", server.uri())}} - }))) - .expect(1) - .mount(&server) + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method("GET", "api/scans"), + None, + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 1, + page_delay_ms: 0, + }, + false, + Format::Json, + ) .await; - - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method("GET", "api/scans"), - None, - None, - None, - &PaginationConfig { page_all: true, page_limit: 1, page_delay_ms: 0 }, - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); + assert!(result.is_ok()); + } } -// --------------------------------------------------------------------------- -// Pagination — offset-based (page/per-page + total-findings) -// --------------------------------------------------------------------------- +mod pagination_offset_based { + use super::*; -#[tokio::test] -async fn should_paginate_with_offset_params() { - let server = MockServer::start().await; + #[tokio::test] + async fn should_increment_page_param() { + let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/api/scans/s-1/results")) - .and(query_param("page", "1")) - .and(query_param("per-page", "25")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "findings": [{"id": "cve-1"}], "total-findings": 50 - }))) - .expect(1) - .mount(&server) - .await; + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "1")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; - Mock::given(method("GET")) - .and(path("/api/scans/s-1/results")) - .and(query_param("page", "2")) - .and(query_param("per-page", "25")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "findings": [{"id": "cve-2"}], "total-findings": 50 - }))) - .expect(1) - .mount(&server) + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .and(query_param("page", "2")) + .and(query_param("per-page", "25")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-2"}], "total-findings": 50 + }))) + .expect(1) + .mount(&server) + .await; + + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) .await; + assert!(result.is_ok()); + } - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method_with_pagination("api/scans/{scan_id}/results"), - Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), - None, - None, - &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); -} + #[tokio::test] + async fn should_stop_on_last_page() { + let server = MockServer::start().await; -#[tokio::test] -async fn should_stop_offset_pagination_on_last_page() { - let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/api/scans/s-1/results")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "findings": [{"id": "cve-1"}], "total-findings": 10 + }))) + .expect(1) + .mount(&server) + .await; - Mock::given(method("GET")) - .and(path("/api/scans/s-1/results")) - .respond_with(ResponseTemplate::new(200).set_body_json(json!({ - "findings": [{"id": "cve-1"}], "total-findings": 10 - }))) - .expect(1) - .mount(&server) + let client = test_client(&server); + let result = execute_method( + Some(&client), + &test_method_with_pagination("api/scans/{scan_id}/results"), + Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), + None, + None, + &PaginationConfig { + page_all: true, + page_limit: 10, + page_delay_ms: 0, + }, + false, + Format::Json, + ) .await; - - let client = test_client(&server); - let result = execute_method( - Some(&client), - &test_method_with_pagination("api/scans/{scan_id}/results"), - Some(r#"{"scan_id": "s-1", "page": 1, "per-page": 25}"#), - None, - None, - &PaginationConfig { page_all: true, page_limit: 10, page_delay_ms: 0 }, - false, - Format::Json, - ) - .await; - assert!(result.is_ok()); + assert!(result.is_ok()); + } } From e60edd476095789daa1b10132f69eb174adb9302 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 18 Mar 2026 12:08:50 +0100 Subject: [PATCH 10/38] feat(dommyrock-analyzer-cli): fix render_sub_resources --- .gitignore | 3 +- Schema-introspection-flow.md | 55 ++ analyzer-discovery.json | 1607 +++++++++++++++++++++++++++++++- skills/analyzer-scans/SKILL.md | 5 +- src/api/generate_skills.rs | 20 +- 5 files changed, 1678 insertions(+), 12 deletions(-) create mode 100644 Schema-introspection-flow.md diff --git a/.gitignore b/.gitignore index c225fd1..e1b9cbe 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target Cargo.lock -usage_examples.txt \ No newline at end of file +usage_examples.txt +*-discovery.json \ No newline at end of file diff --git a/Schema-introspection-flow.md b/Schema-introspection-flow.md new file mode 100644 index 0000000..442e1be --- /dev/null +++ b/Schema-introspection-flow.md @@ -0,0 +1,55 @@ +## When an agent runs analyzer schema analyzer.scans.compliance-check: + +1. Service "analyzer" resolves from the registry +2. Path becomes api.scans.compliance-check (the "api." prefix is prepended by main.rs) +3. Segments: ["api", "scans", "compliance-check"] +4. resolve_method tries first — fails (compliance-check is a resource, not a method) +5. resolve_resource tries next — succeeds, finds the container resource +6. print_resource_tree renders it + +The agent would see this output: + +``` +cyber-resilience-act/ + list (GET) — Computes compliance with Cyber Resilience Act + ai-suggestion/ + begin/ + create (POST) — Triggers CRA AI suggestion using user-provided documen + status/ + list (GET) — Returns status of the CRA AI suggestion. + overwrite/ + overwrite_compliance_check_requirement (PUT) — Overwrites compliance check requirement + report/ + list (GET) — Downloads Cyber Resilience Act compliance report as PDF +``` + +The agent now knows it can drill deeper. For example, to get the full method signature: + +analyzer schema analyzer.scans.compliance-check.cyber-resilience-act.list + +Which would output the JSON method schema: + +```json +{ + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act", + "description": "Computes compliance with Cyber Resilience Act", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"], + "response": { + "$ref": "CyberResilienceActReport" + } +} +``` + +That's the full schema first loop: `browse the tree` → `pick a method` → `get the exact signature` → `build the command`. An agent goes from knowing nothing +to calling the API in 2-3 schema lookups. \ No newline at end of file diff --git a/analyzer-discovery.json b/analyzer-discovery.json index bf51dfb..aedb4a7 100644 --- a/analyzer-discovery.json +++ b/analyzer-discovery.json @@ -1 +1,1606 @@ -{"kind":"discovery#restDescription","discoveryVersion":"v1","id":"analyzer-api-routes:0.5.0","name":"analyzer-api-routes","version":"0.5.0","title":"Analyzer API routes","description":"","protocol":"rest","rootUrl":"","servicePath":"","schemas":{"AiResult":{"id":"AiResult","properties":{"reasoning":{"description":"AI reasoning, which lead to current status","type":"string"},"sources":{"description":"List of documents used by AI to produce current status.","items":{"$ref":"UserUploadedDocument"},"type":"array"},"status":{"$ref":"AiStatus"},"user-action":{"$ref":"SuggestionResponse"}},"type":"object"},"AiStatus":{"description":"Represents the status of a requirement determined by ai","enum":["passed","failed","unknown"],"id":"AiStatus","type":"string"},"AiSuggestionStatus":{"description":"Status of the AI suggestions computation.","id":"AiSuggestionStatus","properties":{"status":{"$ref":"Status"}},"type":"object"},"AnalysisFilter":{"id":"AnalysisFilter","properties":{"query-name":{"$ref":"QueryName"},"values":{"description":"Avaliable filter values with their count.","items":{"$ref":"FilterValue"},"type":"array"}},"type":"object"},"AnalysisFindings":{"description":"Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.","id":"AnalysisFindings"},"AnalysisId":{"description":"A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.","format":"uuid","id":"AnalysisId","type":"string"},"AnalysisInfo":{"description":"Helper struct to define if a analysis should be by default enabled","id":"AnalysisInfo","properties":{"default":{"type":"boolean"},"type":{"type":"string"}},"type":"object"},"AnalysisOverview":{"description":"Like [`ScanOverview`] but for single analysis.","id":"AnalysisOverview"},"AnalysisQueryUnion":{"description":"Union of all available query parameters for analyses.","id":"AnalysisQueryUnion"},"AnalysisResultDTO":{"description":"AnalysisResult but with count of all findings,\nbefore pagination was applied.","id":"AnalysisResultDTO","properties":{"filters":{"description":"Filters that can be used in this analysis.","type":"object"},"findings":{"$ref":"AnalysisFindings"},"total-findings":{"description":"Total count of findings _after_ filtering, but _before_ pagination.","format":"int64","type":"integer"}},"type":"object"},"AnalysisScore":{"description":"The score of an analysis,","id":"AnalysisScore","properties":{"id":{"$ref":"AnalysisId"},"score":{"$ref":"Score"},"type":{"$ref":"AnalysisType"}},"type":"object"},"AnalysisState":{"description":"A analysis that runs for one particular system image.","id":"AnalysisState","properties":{"id":{"$ref":"AnalysisId"},"status":{"$ref":"AnalysisStatus"},"type":{"$ref":"ScanType"}},"type":"object"},"AnalysisStatus":{"description":"Represents the current execution status of an analysis task.","enum":["success","pending","in-progress","canceled","error"],"id":"AnalysisStatus","type":"string"},"AnalysisType":{"description":"Type of the analysis","enum":["info","kernel","cve","password-hash","hardening","malware","software-bom","crypto","capabilities","symbols","tasks","stack-overflow"],"id":"AnalysisType","type":"string"},"AnalyzerResult":{"id":"AnalyzerResult","properties":{"status":{"$ref":"AnalyzerStatus"}},"type":"object"},"AnalyzerStatus":{"description":"Represents the status of a requirement determined by analyzer","enum":["passed","failed","unknown","not-applicable"],"id":"AnalyzerStatus","type":"string"},"ApiScanType":{"description":"List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.","id":"ApiScanType"},"BindFilter":{"enum":["local","global","weak"],"id":"BindFilter","type":"string"},"CapabilitiesOverview":{"description":"Overview for Capability analysis.","id":"CapabilitiesOverview","properties":{"capabilities":{"description":"Capability found and their number of occurrences.","type":"object"},"counts":{"$ref":"RiskLevelCount"},"executable_count":{"description":"Total number executables.","format":"int64","type":"integer"}},"type":"object"},"CapabilityParams":{"id":"CapabilityParams","properties":{"search":{"type":"string"},"severity-filter":{"items":{"$ref":"SeverityFilter"},"type":"array"},"sort-by":{"$ref":"CapabilitySortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"CapabilitySortBy":{"enum":["severity"],"id":"CapabilitySortBy","type":"string"},"Checks":{"description":"Represents the checks performed in the report","id":"Checks","properties":{"failed":{"description":"Number of checks that failed (determined either by analyzer or overwritten by the user)","format":"int32","type":"integer"},"not-applicable":{"description":"Number of not applicable requirements","format":"int32","type":"integer"},"passed":{"description":"Number of checks that passed (determined either by analyzer or overwritten by the user)","format":"int32","type":"integer"},"suggestion-available":{"description":"Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.","format":"int32","type":"integer"},"total":{"description":"Total number of checks performed","format":"int32","type":"integer"},"unknown":{"description":"Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.","format":"int32","type":"integer"}},"type":"object"},"ComponentType":{"enum":["application","framework","library","container","operating-system","device","firmware","file"],"id":"ComponentType","type":"string"},"CreateObject":{"description":"The request to create a new object.","id":"CreateObject","properties":{"description":{"description":"Description of the object.","type":"string"},"name":{"description":"Name of the object.","type":"string"},"tags":{"description":"Tags associated with the object.","items":{"type":"string"},"type":"array"}},"type":"object"},"CryptoOverview":{"description":"Overview for Crypto analysis.","id":"CryptoOverview","properties":{"certificates":{"description":"Number of certificates found.","format":"int64","type":"integer"},"private_keys":{"description":"Number of private keys found.","format":"int64","type":"integer"},"public_keys":{"description":"Number of public keys found.","format":"int64","type":"integer"}},"type":"object"},"CryptoParams":{"id":"CryptoParams","properties":{"search":{"type":"string"},"sort-by":{"$ref":"CryptoSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"CryptoTypeFilter"},"type":"array"}},"type":"object"},"CryptoSortBy":{"enum":["type","key-size","filename","path","issuer"],"id":"CryptoSortBy","type":"string"},"CryptoTypeFilter":{"enum":["certificate","private-key","public-key"],"id":"CryptoTypeFilter","type":"string"},"CveOverview":{"description":"Overview for Cve analysis.","id":"CveOverview","properties":{"counts":{"$ref":"CveSeverityCount"},"products":{"description":"Cve counts for each \"product\" (binary, library, etc.).","type":"object"},"total":{"description":"Sum of all `counts`.","format":"int64","type":"integer"}},"type":"object"},"CveParams":{"id":"CveParams","properties":{"patch-filter":{"items":{"$ref":"CvePatchFilter"},"type":"array"},"search":{"type":"string"},"severity-filter":{"items":{"$ref":"CveSeverityFilter"},"type":"array"},"sort-by":{"$ref":"CveSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"CvePatchFilter":{"enum":["available","unavailable"],"id":"CvePatchFilter","type":"string"},"CveSeverityCount":{"description":"Maps CVE severity to its count","id":"CveSeverityCount","properties":{"critical":{"format":"int64","type":"integer"},"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"},"unknown":{"format":"int64","type":"integer"}},"type":"object"},"CveSeverityFilter":{"enum":["low","medium","high","critical"],"id":"CveSeverityFilter","type":"string"},"CveSortBy":{"enum":["severity"],"id":"CveSortBy","type":"string"},"CyberResilienceActReport":{"description":"Represents a Cyber Resilience Act report","id":"CyberResilienceActReport","properties":{"checks":{"$ref":"Checks"},"created-at":{"description":"Date and time when the report was created.","format":"date-time","type":"string"},"name":{"description":"Name of the report.","type":"string"},"sections":{"description":"List of categories in the report.","items":{"$ref":"Section"},"type":"array"},"updated-at":{"description":"Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.","format":"date-time","type":"string"}},"type":"object"},"DockerAnalysis":{"description":"Represents different types of analyses for Docker containers.","enum":["info","cve","password-hash","crypto","software-bom","malware","hardening","capabilities"],"id":"DockerAnalysis","type":"string"},"DockerInfo":{"description":"Container metadata information\n\nRepresents various metadata attributes of a container image","id":"DockerInfo","properties":{"arch":{"description":"List of supported CPU architectures for the container","items":{"type":"string"},"type":"array"},"ctime":{"description":"List of creation timestamps for container layers","items":{"type":"string"},"type":"array"},"env":{"description":"List of environment variables defined in the container","items":{"type":"string"},"type":"array"},"history":{"description":"List of commands used to build the container layers","items":{"$ref":"History"},"type":"array"},"os":{"description":"List of supported operating systems for the container","items":{"type":"string"},"type":"array"},"os_name":{"description":"Name of the base operating system used in the container","type":"string"},"os_version":{"description":"Version of the base operating system used in the container","type":"string"},"tags":{"description":"List of container image tags associated with the image","items":{"type":"string"},"type":"array"}},"type":"object"},"DockerInfoResult":{"description":"Info result for docker image","id":"DockerInfoResult"},"DocumentListItem":{"description":"A single document entry in a listing.","id":"DocumentListItem","properties":{"file-name":{"description":"Original file name, serves as the unique key within a scan's document storage","type":"string"}},"type":"object"},"DocumentListResponse":{"description":"A list of documents associated with a scan.","id":"DocumentListResponse","properties":{"documents":{"items":{"$ref":"DocumentListItem"},"type":"array"}},"type":"object"},"DocumentUploadResponse":{"description":"The response after successfully uploading a document.","id":"DocumentUploadResponse","properties":{"file-name":{"description":"Original file name, serves as the unique key within a scan's document storage","type":"string"}},"type":"object"},"FeaturesFilter":{"enum":["seccomp","seccomp-filter","security-network","stack-protector","fortify-source","vmap-kernel-stack","usercopy","heap-freelist-obfuscation","executable-memory-protection","kaslr","apparmor","selinux","smack","tomoyo","yama"],"id":"FeaturesFilter","type":"string"},"FilterValue":{"id":"FilterValue","properties":{"count":{"description":"Count of findings matching this value for current filter options.","format":"int64","type":"integer"},"value":{"description":"Filter value that can be passed in query paramters.","type":"string"}},"type":"object"},"HardeningOverview":{"description":"Overview for Hardening analysis.","id":"HardeningOverview","properties":{"counts":{"$ref":"HardeningSeverityCount"},"total":{"description":"Sum of all `counts`.","format":"int64","type":"integer"}},"type":"object"},"HardeningParams":{"id":"HardeningParams","properties":{"search":{"type":"string"},"severity-filter":{"items":{"$ref":"HardeningSeverityFilter"},"type":"array"},"sort-by":{"$ref":"HardeningSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"HardeningSeverityCount":{"description":"Maps Hardening severity to its count","id":"HardeningSeverityCount","properties":{"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"}},"type":"object"},"HardeningSeverityFilter":{"enum":["low","medium","high"],"id":"HardeningSeverityFilter","type":"string"},"HardeningSortBy":{"enum":["severity","filename","canary","nx","pie","relro","fortify"],"id":"HardeningSortBy","type":"string"},"HealthStatus":{"description":"Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.","id":"HealthStatus","properties":{"healthy":{"type":"boolean"},"message":{"type":"string"}},"type":"object"},"History":{"id":"History","properties":{"created":{"format":"date-time","type":"string"},"created_by":{"type":"string"},"empty_layer":{"type":"boolean"}},"type":"object"},"IdfAnalysis":{"description":"Represents analyses specific to IDF (IoT Device Framework) targets.","enum":["info","cve","software-bom","symbols","tasks","stack-overflow"],"id":"IdfAnalysis","type":"string"},"IdfInfo":{"description":"IdfInfo analysis entry for idf image","id":"IdfInfo","properties":{"arch":{"description":"Architecture type","type":"string"},"compiler":{"description":"Compiler name and version used to create this image","type":"string"},"freertos":{"description":"freertos version","type":"string"},"idf":{"description":"idf version","type":"string"}},"type":"object"},"IdfInfoResult":{"description":"Info result for idf image","id":"IdfInfoResult"},"IdfSymbolParams":{"id":"IdfSymbolParams","properties":{"bind-filter":{"items":{"$ref":"BindFilter"},"type":"array"},"search":{"type":"string"},"sort-by":{"$ref":"IdfSymbolSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"TypeFilter"},"type":"array"}},"type":"object"},"IdfSymbolSortBy":{"enum":["name"],"id":"IdfSymbolSortBy","type":"string"},"IdfTaskParams":{"id":"IdfTaskParams","properties":{"search":{"type":"string"},"sort-by":{"$ref":"IdfTaskSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"IdfTaskSortBy":{"enum":["function","name"],"id":"IdfTaskSortBy","type":"string"},"Image":{"description":"A image on which a scan is executed","id":"Image","properties":{"file_name":{"description":"The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.","type":"string"},"id":{"$ref":"ImageId"}},"type":"object"},"ImageId":{"description":"A wrapper struct `ImageId` around a UUID.\n ID in the images table.","format":"uuid","id":"ImageId","type":"string"},"ImageType":{"description":"Type of the image used in scan","enum":["linux","docker","idf"],"id":"ImageType","type":"string"},"Info":{"id":"Info"},"InfoOverview":{"id":"InfoOverview"},"KernelOverview":{"description":"Overview for Kernel analysis.","id":"KernelOverview","properties":{"count":{"description":"Number of kernel security features enabled.","format":"int64","type":"integer"}},"type":"object"},"KernelParams":{"id":"KernelParams","properties":{"features-filter":{"items":{"$ref":"FeaturesFilter"},"type":"array"},"sort-by":{"$ref":"KernelSortBy"},"sort-ord":{"$ref":"SortOrd"},"status-filter":{"items":{"$ref":"StatusFilter"},"type":"array"}},"type":"object"},"KernelSortBy":{"enum":["features","status"],"id":"KernelSortBy","type":"string"},"LinuxAnalysis":{"description":"Represents different types of analyses that can be performed on a Linux system.","enum":["info","kernel","cve","password-hash","crypto","software-bom","malware","hardening","capabilities"],"id":"LinuxAnalysis","type":"string"},"LinuxInfo":{"description":"Represents the information about the system","id":"LinuxInfo","properties":{"arch":{"description":"The tags associated with the system","type":"string"},"banner":{"description":"The operating system name","type":"string"},"kernel_version":{"description":"The kernel version","type":"string"},"libc":{"description":"The operating system version","type":"string"}},"type":"object"},"LinuxInfoResult":{"description":"Info result for linux image","id":"LinuxInfoResult"},"MalwareOverview":{"description":"Overview for Malware analysis.","id":"MalwareOverview","properties":{"count":{"description":"Number of malware detected.","format":"int64","type":"integer"}},"type":"object"},"MalwareParams":{"id":"MalwareParams","properties":{"sort-by":{"$ref":"MalwareSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"MalwareSortBy":{"enum":["filename"],"id":"MalwareSortBy","type":"string"},"NewScanResponse":{"description":"The response if a new scan is created.","id":"NewScanResponse","properties":{"id":{"$ref":"ScanId"}},"type":"object"},"ObjectId":{"description":"A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.","format":"uuid","id":"ObjectId","type":"string"},"PasswordHashOverview":{"description":"Overview for Password Hash analysis.","id":"PasswordHashOverview","properties":{"count":{"description":"Number of passwords decoded.","format":"int64","type":"integer"}},"type":"object"},"PasswordHashParams":{"id":"PasswordHashParams","properties":{"severity-filter":{"items":{"$ref":"PasswordHashSeverityFilter"},"type":"array"},"sort-by":{"$ref":"PasswordHashSortBy"},"sort-ord":{"$ref":"SortOrd"}},"type":"object"},"PasswordHashSeverityFilter":{"enum":["medium","high"],"id":"PasswordHashSeverityFilter","type":"string"},"PasswordHashSortBy":{"enum":["severity","username"],"id":"PasswordHashSortBy","type":"string"},"QueryName":{"description":"Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.","enum":["license-filter"],"id":"QueryName","type":"string"},"Requirement":{"description":"Represents a requirement in the report","id":"Requirement","properties":{"advice":{"description":"Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.","type":"string"},"ai-suggestion":{"$ref":"AiResult"},"analyzer":{"$ref":"AnalyzerResult"},"description":{"description":"Description of the requirement.","type":"string"},"explanation":{"description":"Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.","type":"string"},"id":{"$ref":"RequirementId"},"policy-ref":{"description":"Reference to the policy associated with the requirement.","type":"string"},"status":{"$ref":"RequirementStatus"},"user-overwrite":{"$ref":"UserResult"}},"type":"object"},"RequirementId":{"description":"Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.","enum":["cve-exploits","password-strength","security-updates","update-notifications","access-control","unauthorized-access","data-encryption","data-integrity","data-collection","essential-availability","minimise-impact","attack-surfaces","attack-reduction","activity-monitoring","data-removal","vulns-documentation","vulns-security-updates","update-security-and-automation","security-testing-and-review","fixed-vulns-disclosure","vulns-coordinated-disclosure","vulns-reporting-contact","security-updates-dissemination"],"id":"RequirementId","type":"string"},"RequirementOverwrite":{"description":"User action on a CRA requirement — either a manual overwrite or an AI suggestion response.","id":"RequirementOverwrite"},"RequirementStatus":{"description":"Overall status of the requirement\ncomputed by taking into account all user interactions.","enum":["passed","failed","unknown","unknown-with-suggestion","not-applicable"],"id":"RequirementStatus","type":"string"},"RiskLevelCount":{"description":"Count all different risk levels of the analysis.","id":"RiskLevelCount","properties":{"critical":{"format":"int64","type":"integer"},"high":{"format":"int64","type":"integer"},"low":{"format":"int64","type":"integer"},"medium":{"format":"int64","type":"integer"},"none":{"format":"int64","type":"integer"},"unknown":{"format":"int64","type":"integer"}},"type":"object"},"SbomParams":{"id":"SbomParams","properties":{"license-filter":{"items":{"type":"string"},"type":"array"},"search":{"type":"string"},"sort-by":{"$ref":"SbomSortBy"},"sort-ord":{"$ref":"SortOrd"},"type-filter":{"items":{"$ref":"ComponentType"},"type":"array"}},"type":"object"},"SbomSortBy":{"enum":["name"],"id":"SbomSortBy","type":"string"},"Scan":{"description":"Represents a scan that aggregates multiple analyses executed on a particular image.","id":"Scan","properties":{"analysis":{"description":"All analyses processed as part of this scan.","items":{"$ref":"AnalysisState"},"type":"array"},"created":{"description":"The date and time when the scan was initiated.","format":"date-time","type":"string"},"id":{"$ref":"ScanId"},"image":{"$ref":"Image"},"image_type":{"$ref":"ImageType"},"info":{"$ref":"Info"},"score":{"$ref":"ScanScore"}},"type":"object"},"ScanId":{"description":"A wrapper struct `ScanId` around a UUID.\n ID in the scans table.","format":"uuid","id":"ScanId","type":"string"},"ScanOverview":{"description":"Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.","id":"ScanOverview","properties":{"capabilities":{"$ref":"CapabilitiesOverview"},"crypto":{"$ref":"CryptoOverview"},"cve":{"$ref":"CveOverview"},"hardening":{"$ref":"HardeningOverview"},"info":{"$ref":"InfoOverview"},"kernel":{"$ref":"KernelOverview"},"malware":{"$ref":"MalwareOverview"},"password-hash":{"$ref":"PasswordHashOverview"},"software-bom":{"$ref":"SoftwareBOMOverview"},"stack-overflow":{"$ref":"StackOverflowOverview"},"symbols":{"$ref":"SymbolsOverview"},"tasks":{"$ref":"TasksOverview"}},"type":"object"},"ScanScore":{"description":"The calculate score with an weighted algorithm over all analysis.","id":"ScanScore","properties":{"score":{"$ref":"Score"},"scores":{"description":"Individual analyses scores.","items":{"$ref":"AnalysisScore"},"type":"array"}},"type":"object"},"ScanStatus":{"description":"The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).","id":"ScanStatus","properties":{"id":{"$ref":"ScanId"},"status":{"$ref":"AnalysisStatus"}},"type":"object"},"ScanType":{"description":"Represents a unified type for analyses across all supported images.","id":"ScanType"},"Score":{"description":"Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.","format":"int32","id":"Score","type":"integer"},"Section":{"description":"Represents a group of requirements, grouped by [SubSection]s.","id":"Section","properties":{"label":{"description":"Name of the requirement","type":"string"},"policy-ref":{"description":"Reference to the policy associated with the requirement","type":"string"},"sub-sections":{"description":"List of sub-requirements or checks associated with this requirement","items":{"$ref":"SubSection"},"type":"array"}},"type":"object"},"SeverityFilter":{"enum":["none","low","medium","high","critical","unknown"],"id":"SeverityFilter","type":"string"},"SoftwareBOMOverview":{"description":"Overview for Software BOM analysis.","id":"SoftwareBOMOverview","properties":{"count":{"description":"Total number of software BOM entries.","format":"int64","type":"integer"},"licenses":{"description":"License type and their number of occurrences.","type":"object"}},"type":"object"},"SortOrd":{"enum":["asc","desc"],"id":"SortOrd","type":"string"},"StackOverflowOverview":{"description":"Overview for Stack Overflow analysis.","id":"StackOverflowOverview","properties":{"method":{"description":"Name of the protection method used,\nor `None` if stack overflow protection is not enabled.","type":"string"}},"type":"object"},"Status":{"description":"Status of the AI suggestions computation.","enum":["in-progress","finished"],"id":"Status","type":"string"},"StatusFilter":{"enum":["enabled","disabled"],"id":"StatusFilter","type":"string"},"SubSection":{"description":"Represents a group of requirements","id":"SubSection","properties":{"label":{"description":"Name of the requirement","type":"string"},"requirements":{"description":"List of sub-requirements or checks associated with this requirement","items":{"$ref":"Requirement"},"type":"array"}},"type":"object"},"SuggestionResponse":{"description":"User response to AI suggestion.","enum":["accepted","rejected"],"id":"SuggestionResponse","type":"string"},"SymbolsOverview":{"description":"Overview for Symbol analysis.","id":"SymbolsOverview","properties":{"count":{"description":"Number of analyzed symbols.","format":"int64","type":"integer"}},"type":"object"},"TasksOverview":{"description":"Overview for Task analysis.","id":"TasksOverview","properties":{"count":{"description":"Number of analysed tasks.","format":"int64","type":"integer"}},"type":"object"},"TypeFilter":{"enum":["sect","func","obj","file","notype"],"id":"TypeFilter","type":"string"},"UpdateObject":{"description":"The request to update fields on an [`Object`].","id":"UpdateObject","properties":{"description":{"description":"Description of the object.","type":"string"},"favorite":{"description":"Sets if the object is a favorite or not.","type":"boolean"},"name":{"description":"Name of the object.","type":"string"},"tags":{"description":"The tags associated with the object.","items":{"type":"string"},"type":"array"}},"type":"object"},"UserId":{"description":"A wrapper struct `UserId` around a UUID.\n ID in the users table.","format":"uuid","id":"UserId","type":"string"},"UserResult":{"id":"UserResult","properties":{"status":{"$ref":"UserStatus"}},"type":"object"},"UserStatus":{"description":"Represents the status of a requirement overwritten by the user","enum":["passed","failed"],"id":"UserStatus","type":"string"},"UserUploadedDocument":{"description":"Description of the user provided file\nused by ai to give its suggestion.","id":"UserUploadedDocument","properties":{"filename":{"description":"Name of the user uploaded file.","type":"string"}},"type":"object"}},"resources":{"api":{"resources":{"health":{"methods":{"list":{"id":"analyzer-api-routes.api.health.list","httpMethod":"GET","path":"api/health","description":"Returns if the service is in an healthy state.","response":{"$ref":"HealthStatus"}}}},"objects":{"methods":{"create":{"id":"analyzer-api-routes.api.objects.create","httpMethod":"POST","path":"api/objects","description":"Create new object","request":{"$ref":"CreateObject"}},"delete":{"id":"analyzer-api-routes.api.objects.delete","httpMethod":"DELETE","path":"api/objects/{id}","description":"Deletes a object and all related scans.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Unique identifier of the object to delete"}},"parameterOrder":["id"]},"get":{"id":"analyzer-api-routes.api.objects.get","httpMethod":"GET","path":"api/objects/{id}","description":"Retrieve an object by its ID.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"]},"list":{"id":"analyzer-api-routes.api.objects.list","httpMethod":"GET","path":"api/objects","description":"Retrieve a list of all objects of the current user.","parameters":{"end_timestamp":{"type":"string","required":false,"location":"query","description":"End timestamp for pagination.","format":"datetime"},"id":{"type":"string","required":false,"location":"query","description":"Pagination cursor (UUID).","format":"uuid"},"limit":{"type":"integer","required":false,"location":"query","description":"Maximum number of items per page.","format":"int32"},"start_timestamp":{"type":"string","required":false,"location":"query","description":"Start timestamp for pagination.","format":"datetime"}}},"update":{"id":"analyzer-api-routes.api.objects.update","httpMethod":"PUT","path":"api/objects/{id}","description":"Update an object","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"],"request":{"$ref":"UpdateObject"}}},"resources":{"scans":{"methods":{"list":{"id":"analyzer-api-routes.api.objects.scans.list","httpMethod":"GET","path":"api/objects/{id}/scans","description":"Those scans could be","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Object ID"}},"parameterOrder":["id"]}}}}},"scans":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.create","httpMethod":"POST","path":"api/scans","description":"Schedule a new scan.","response":{"$ref":"NewScanResponse"}},"delete":{"id":"analyzer-api-routes.api.scans.delete","httpMethod":"DELETE","path":"api/scans/{id}","description":"Delete a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]},"get":{"id":"analyzer-api-routes.api.scans.get","httpMethod":"GET","path":"api/scans/{id}","description":"Returns a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"Scan"}},"list":{"id":"analyzer-api-routes.api.scans.list","httpMethod":"GET","path":"api/scans","description":"Retrieve a list of scans."}},"resources":{"cancel":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.cancel.create","httpMethod":"POST","path":"api/scans/{id}/cancel","description":"This can be used to cancel an already pending or running scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]}}},"compliance-check":{"resources":{"cyber-resilience-act":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act","description":"Computes compliance with Cyber Resilience Act","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"response":{"$ref":"CyberResilienceActReport"}}},"resources":{"ai-suggestion":{"resources":{"begin":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create","httpMethod":"POST","path":"api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin","description":"Triggers CRA AI suggestion using user-provided documents.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"status":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status","description":"Returns status of the CRA AI suggestion.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"response":{"$ref":"AiSuggestionStatus"}}}}}},"overwrite":{"methods":{"overwrite_compliance_check_requirement":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement","httpMethod":"PUT","path":"api/scans/{id}/compliance-check/cyber-resilience-act/overwrite","description":"Overwrites compliance check requirement","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"],"request":{"$ref":"RequirementOverwrite"}}}},"report":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list","httpMethod":"GET","path":"api/scans/{id}/compliance-check/cyber-resilience-act/report","description":"Downloads Cyber Resilience Act compliance report as PDF","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}}}}}},"documents":{"methods":{"create":{"id":"analyzer-api-routes.api.scans.documents.create","httpMethod":"POST","path":"api/scans/{id}/documents","description":"Upload a document for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"DocumentUploadResponse"}},"delete":{"id":"analyzer-api-routes.api.scans.documents.delete","httpMethod":"DELETE","path":"api/scans/{id}/documents/{file_name}","description":"Delete a single document for a scan.","parameters":{"file_name":{"type":"string","required":true,"location":"path","description":"Document file name"},"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id","file_name"]},"delete_documents":{"id":"analyzer-api-routes.api.scans.documents.delete_documents","httpMethod":"DELETE","path":"api/scans/{id}/documents","description":"Delete all documents for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"]},"list":{"id":"analyzer-api-routes.api.scans.documents.list","httpMethod":"GET","path":"api/scans/{id}/documents","description":"List documents for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"DocumentListResponse"}}}},"overview":{"methods":{"get":{"id":"analyzer-api-routes.api.scans.overview.get","httpMethod":"GET","path":"api/scans/{scan_id}/overview/{analysis_id}","description":"Returns an overview of one analysis.","parameters":{"analysis_id":{"type":"string","required":true,"location":"path","description":"Analysis ID"},"scan_id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["scan_id","analysis_id"],"response":{"$ref":"AnalysisOverview"}},"list":{"id":"analyzer-api-routes.api.scans.overview.list","httpMethod":"GET","path":"api/scans/{id}/overview","description":"Returns an aggregated overview of all analysis executed for one scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanOverview"}}}},"report":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.report.list","httpMethod":"GET","path":"api/scans/{id}/report","description":"Downloads a PDF security report for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"results":{"methods":{"get":{"id":"analyzer-api-routes.api.scans.results.get","httpMethod":"GET","path":"api/scans/{scan_id}/results/{analysis_id}","description":"Retrieve the results of one specific analysis of a scan.","parameters":{"analysis_id":{"type":"string","required":true,"location":"path","description":"Analysis ID"},"page":{"type":"integer","required":false,"location":"query","description":"Page number (must be > 0). If provided, `per-page` must also be provided.","format":"int32"},"per-page":{"type":"integer","required":false,"location":"query","description":"Items per page (must be > 0). If provided, `page` must also be provided.","format":"int32"},"query":{"type":"string","required":true,"location":"query","description":"Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types."},"scan_id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["scan_id","analysis_id"],"response":{"$ref":"AnalysisResultDTO"}}}},"sbom":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.sbom.list","httpMethod":"GET","path":"api/scans/{id}/sbom","description":"Downloads the SBOM (CycloneDX JSON) for a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID","format":"uuid"}},"parameterOrder":["id"]}}},"score":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.score.list","httpMethod":"GET","path":"api/scans/{id}/score","description":"Returns a security score of all successful finished analyses with their individual scores included.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanScore"}}}},"status":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.status.list","httpMethod":"GET","path":"api/scans/{id}/status","description":"Returns the status of a scan.","parameters":{"id":{"type":"string","required":true,"location":"path","description":"Scan ID"}},"parameterOrder":["id"],"response":{"$ref":"ScanStatus"}}}},"types":{"methods":{"list":{"id":"analyzer-api-routes.api.scans.types.list","httpMethod":"GET","path":"api/scans/types","description":"Returns a list of all available analysis types for each different image."}}}}}}}}} \ No newline at end of file +{ + "kind": "discovery#restDescription", + "discoveryVersion": "v1", + "id": "analyzer-api-routes:0.5.0", + "name": "analyzer-api-routes", + "version": "0.5.0", + "title": "Analyzer API routes", + "description": "", + "protocol": "rest", + "rootUrl": "", + "servicePath": "", + "schemas": { + "AiResult": { + "id": "AiResult", + "properties": { + "reasoning": { + "description": "AI reasoning, which lead to current status", + "type": "string" + }, + "sources": { + "description": "List of documents used by AI to produce current status.", + "items": { "$ref": "UserUploadedDocument" }, + "type": "array" + }, + "status": { "$ref": "AiStatus" }, + "user-action": { "$ref": "SuggestionResponse" } + }, + "type": "object" + }, + "AiStatus": { + "description": "Represents the status of a requirement determined by ai", + "enum": ["passed", "failed", "unknown"], + "id": "AiStatus", + "type": "string" + }, + "AiSuggestionStatus": { + "description": "Status of the AI suggestions computation.", + "id": "AiSuggestionStatus", + "properties": { "status": { "$ref": "Status" } }, + "type": "object" + }, + "AnalysisFilter": { + "id": "AnalysisFilter", + "properties": { + "query-name": { "$ref": "QueryName" }, + "values": { + "description": "Avaliable filter values with their count.", + "items": { "$ref": "FilterValue" }, + "type": "array" + } + }, + "type": "object" + }, + "AnalysisFindings": { + "description": "Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.", + "id": "AnalysisFindings" + }, + "AnalysisId": { + "description": "A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.", + "format": "uuid", + "id": "AnalysisId", + "type": "string" + }, + "AnalysisInfo": { + "description": "Helper struct to define if a analysis should be by default enabled", + "id": "AnalysisInfo", + "properties": { "default": { "type": "boolean" }, "type": { "type": "string" } }, + "type": "object" + }, + "AnalysisOverview": { + "description": "Like [`ScanOverview`] but for single analysis.", + "id": "AnalysisOverview" + }, + "AnalysisQueryUnion": { + "description": "Union of all available query parameters for analyses.", + "id": "AnalysisQueryUnion" + }, + "AnalysisResultDTO": { + "description": "AnalysisResult but with count of all findings,\nbefore pagination was applied.", + "id": "AnalysisResultDTO", + "properties": { + "filters": { + "description": "Filters that can be used in this analysis.", + "type": "object" + }, + "findings": { "$ref": "AnalysisFindings" }, + "total-findings": { + "description": "Total count of findings _after_ filtering, but _before_ pagination.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "AnalysisScore": { + "description": "The score of an analysis,", + "id": "AnalysisScore", + "properties": { + "id": { "$ref": "AnalysisId" }, + "score": { "$ref": "Score" }, + "type": { "$ref": "AnalysisType" } + }, + "type": "object" + }, + "AnalysisState": { + "description": "A analysis that runs for one particular system image.", + "id": "AnalysisState", + "properties": { + "id": { "$ref": "AnalysisId" }, + "status": { "$ref": "AnalysisStatus" }, + "type": { "$ref": "ScanType" } + }, + "type": "object" + }, + "AnalysisStatus": { + "description": "Represents the current execution status of an analysis task.", + "enum": ["success", "pending", "in-progress", "canceled", "error"], + "id": "AnalysisStatus", + "type": "string" + }, + "AnalysisType": { + "description": "Type of the analysis", + "enum": [ + "info", + "kernel", + "cve", + "password-hash", + "hardening", + "malware", + "software-bom", + "crypto", + "capabilities", + "symbols", + "tasks", + "stack-overflow" + ], + "id": "AnalysisType", + "type": "string" + }, + "AnalyzerResult": { + "id": "AnalyzerResult", + "properties": { "status": { "$ref": "AnalyzerStatus" } }, + "type": "object" + }, + "AnalyzerStatus": { + "description": "Represents the status of a requirement determined by analyzer", + "enum": ["passed", "failed", "unknown", "not-applicable"], + "id": "AnalyzerStatus", + "type": "string" + }, + "ApiScanType": { + "description": "List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.", + "id": "ApiScanType" + }, + "BindFilter": { "enum": ["local", "global", "weak"], "id": "BindFilter", "type": "string" }, + "CapabilitiesOverview": { + "description": "Overview for Capability analysis.", + "id": "CapabilitiesOverview", + "properties": { + "capabilities": { + "description": "Capability found and their number of occurrences.", + "type": "object" + }, + "counts": { "$ref": "RiskLevelCount" }, + "executable_count": { + "description": "Total number executables.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CapabilityParams": { + "id": "CapabilityParams", + "properties": { + "search": { "type": "string" }, + "severity-filter": { "items": { "$ref": "SeverityFilter" }, "type": "array" }, + "sort-by": { "$ref": "CapabilitySortBy" }, + "sort-ord": { "$ref": "SortOrd" } + }, + "type": "object" + }, + "CapabilitySortBy": { "enum": ["severity"], "id": "CapabilitySortBy", "type": "string" }, + "Checks": { + "description": "Represents the checks performed in the report", + "id": "Checks", + "properties": { + "failed": { + "description": "Number of checks that failed (determined either by analyzer or overwritten by the user)", + "format": "int32", + "type": "integer" + }, + "not-applicable": { + "description": "Number of not applicable requirements", + "format": "int32", + "type": "integer" + }, + "passed": { + "description": "Number of checks that passed (determined either by analyzer or overwritten by the user)", + "format": "int32", + "type": "integer" + }, + "suggestion-available": { + "description": "Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.", + "format": "int32", + "type": "integer" + }, + "total": { + "description": "Total number of checks performed", + "format": "int32", + "type": "integer" + }, + "unknown": { + "description": "Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ComponentType": { + "enum": [ + "application", + "framework", + "library", + "container", + "operating-system", + "device", + "firmware", + "file" + ], + "id": "ComponentType", + "type": "string" + }, + "CreateObject": { + "description": "The request to create a new object.", + "id": "CreateObject", + "properties": { + "description": { "description": "Description of the object.", "type": "string" }, + "name": { "description": "Name of the object.", "type": "string" }, + "tags": { + "description": "Tags associated with the object.", + "items": { "type": "string" }, + "type": "array" + } + }, + "type": "object" + }, + "CryptoOverview": { + "description": "Overview for Crypto analysis.", + "id": "CryptoOverview", + "properties": { + "certificates": { + "description": "Number of certificates found.", + "format": "int64", + "type": "integer" + }, + "private_keys": { + "description": "Number of private keys found.", + "format": "int64", + "type": "integer" + }, + "public_keys": { + "description": "Number of public keys found.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "CryptoParams": { + "id": "CryptoParams", + "properties": { + "search": { "type": "string" }, + "sort-by": { "$ref": "CryptoSortBy" }, + "sort-ord": { "$ref": "SortOrd" }, + "type-filter": { "items": { "$ref": "CryptoTypeFilter" }, "type": "array" } + }, + "type": "object" + }, + "CryptoSortBy": { + "enum": ["type", "key-size", "filename", "path", "issuer"], + "id": "CryptoSortBy", + "type": "string" + }, + "CryptoTypeFilter": { + "enum": ["certificate", "private-key", "public-key"], + "id": "CryptoTypeFilter", + "type": "string" + }, + "CveOverview": { + "description": "Overview for Cve analysis.", + "id": "CveOverview", + "properties": { + "counts": { "$ref": "CveSeverityCount" }, + "products": { + "description": "Cve counts for each \"product\" (binary, library, etc.).", + "type": "object" + }, + "total": { "description": "Sum of all `counts`.", "format": "int64", "type": "integer" } + }, + "type": "object" + }, + "CveParams": { + "id": "CveParams", + "properties": { + "patch-filter": { "items": { "$ref": "CvePatchFilter" }, "type": "array" }, + "search": { "type": "string" }, + "severity-filter": { "items": { "$ref": "CveSeverityFilter" }, "type": "array" }, + "sort-by": { "$ref": "CveSortBy" }, + "sort-ord": { "$ref": "SortOrd" } + }, + "type": "object" + }, + "CvePatchFilter": { + "enum": ["available", "unavailable"], + "id": "CvePatchFilter", + "type": "string" + }, + "CveSeverityCount": { + "description": "Maps CVE severity to its count", + "id": "CveSeverityCount", + "properties": { + "critical": { "format": "int64", "type": "integer" }, + "high": { "format": "int64", "type": "integer" }, + "low": { "format": "int64", "type": "integer" }, + "medium": { "format": "int64", "type": "integer" }, + "unknown": { "format": "int64", "type": "integer" } + }, + "type": "object" + }, + "CveSeverityFilter": { + "enum": ["low", "medium", "high", "critical"], + "id": "CveSeverityFilter", + "type": "string" + }, + "CveSortBy": { "enum": ["severity"], "id": "CveSortBy", "type": "string" }, + "CyberResilienceActReport": { + "description": "Represents a Cyber Resilience Act report", + "id": "CyberResilienceActReport", + "properties": { + "checks": { "$ref": "Checks" }, + "created-at": { + "description": "Date and time when the report was created.", + "format": "date-time", + "type": "string" + }, + "name": { "description": "Name of the report.", "type": "string" }, + "sections": { + "description": "List of categories in the report.", + "items": { "$ref": "Section" }, + "type": "array" + }, + "updated-at": { + "description": "Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "DockerAnalysis": { + "description": "Represents different types of analyses for Docker containers.", + "enum": [ + "info", + "cve", + "password-hash", + "crypto", + "software-bom", + "malware", + "hardening", + "capabilities" + ], + "id": "DockerAnalysis", + "type": "string" + }, + "DockerInfo": { + "description": "Container metadata information\n\nRepresents various metadata attributes of a container image", + "id": "DockerInfo", + "properties": { + "arch": { + "description": "List of supported CPU architectures for the container", + "items": { "type": "string" }, + "type": "array" + }, + "ctime": { + "description": "List of creation timestamps for container layers", + "items": { "type": "string" }, + "type": "array" + }, + "env": { + "description": "List of environment variables defined in the container", + "items": { "type": "string" }, + "type": "array" + }, + "history": { + "description": "List of commands used to build the container layers", + "items": { "$ref": "History" }, + "type": "array" + }, + "os": { + "description": "List of supported operating systems for the container", + "items": { "type": "string" }, + "type": "array" + }, + "os_name": { + "description": "Name of the base operating system used in the container", + "type": "string" + }, + "os_version": { + "description": "Version of the base operating system used in the container", + "type": "string" + }, + "tags": { + "description": "List of container image tags associated with the image", + "items": { "type": "string" }, + "type": "array" + } + }, + "type": "object" + }, + "DockerInfoResult": { "description": "Info result for docker image", "id": "DockerInfoResult" }, + "DocumentListItem": { + "description": "A single document entry in a listing.", + "id": "DocumentListItem", + "properties": { + "file-name": { + "description": "Original file name, serves as the unique key within a scan's document storage", + "type": "string" + } + }, + "type": "object" + }, + "DocumentListResponse": { + "description": "A list of documents associated with a scan.", + "id": "DocumentListResponse", + "properties": { "documents": { "items": { "$ref": "DocumentListItem" }, "type": "array" } }, + "type": "object" + }, + "DocumentUploadResponse": { + "description": "The response after successfully uploading a document.", + "id": "DocumentUploadResponse", + "properties": { + "file-name": { + "description": "Original file name, serves as the unique key within a scan's document storage", + "type": "string" + } + }, + "type": "object" + }, + "FeaturesFilter": { + "enum": [ + "seccomp", + "seccomp-filter", + "security-network", + "stack-protector", + "fortify-source", + "vmap-kernel-stack", + "usercopy", + "heap-freelist-obfuscation", + "executable-memory-protection", + "kaslr", + "apparmor", + "selinux", + "smack", + "tomoyo", + "yama" + ], + "id": "FeaturesFilter", + "type": "string" + }, + "FilterValue": { + "id": "FilterValue", + "properties": { + "count": { + "description": "Count of findings matching this value for current filter options.", + "format": "int64", + "type": "integer" + }, + "value": { + "description": "Filter value that can be passed in query paramters.", + "type": "string" + } + }, + "type": "object" + }, + "HardeningOverview": { + "description": "Overview for Hardening analysis.", + "id": "HardeningOverview", + "properties": { + "counts": { "$ref": "HardeningSeverityCount" }, + "total": { "description": "Sum of all `counts`.", "format": "int64", "type": "integer" } + }, + "type": "object" + }, + "HardeningParams": { + "id": "HardeningParams", + "properties": { + "search": { "type": "string" }, + "severity-filter": { "items": { "$ref": "HardeningSeverityFilter" }, "type": "array" }, + "sort-by": { "$ref": "HardeningSortBy" }, + "sort-ord": { "$ref": "SortOrd" } + }, + "type": "object" + }, + "HardeningSeverityCount": { + "description": "Maps Hardening severity to its count", + "id": "HardeningSeverityCount", + "properties": { + "high": { "format": "int64", "type": "integer" }, + "low": { "format": "int64", "type": "integer" }, + "medium": { "format": "int64", "type": "integer" } + }, + "type": "object" + }, + "HardeningSeverityFilter": { + "enum": ["low", "medium", "high"], + "id": "HardeningSeverityFilter", + "type": "string" + }, + "HardeningSortBy": { + "enum": ["severity", "filename", "canary", "nx", "pie", "relro", "fortify"], + "id": "HardeningSortBy", + "type": "string" + }, + "HealthStatus": { + "description": "Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.", + "id": "HealthStatus", + "properties": { "healthy": { "type": "boolean" }, "message": { "type": "string" } }, + "type": "object" + }, + "History": { + "id": "History", + "properties": { + "created": { "format": "date-time", "type": "string" }, + "created_by": { "type": "string" }, + "empty_layer": { "type": "boolean" } + }, + "type": "object" + }, + "IdfAnalysis": { + "description": "Represents analyses specific to IDF (IoT Device Framework) targets.", + "enum": ["info", "cve", "software-bom", "symbols", "tasks", "stack-overflow"], + "id": "IdfAnalysis", + "type": "string" + }, + "IdfInfo": { + "description": "IdfInfo analysis entry for idf image", + "id": "IdfInfo", + "properties": { + "arch": { "description": "Architecture type", "type": "string" }, + "compiler": { + "description": "Compiler name and version used to create this image", + "type": "string" + }, + "freertos": { "description": "freertos version", "type": "string" }, + "idf": { "description": "idf version", "type": "string" } + }, + "type": "object" + }, + "IdfInfoResult": { "description": "Info result for idf image", "id": "IdfInfoResult" }, + "IdfSymbolParams": { + "id": "IdfSymbolParams", + "properties": { + "bind-filter": { "items": { "$ref": "BindFilter" }, "type": "array" }, + "search": { "type": "string" }, + "sort-by": { "$ref": "IdfSymbolSortBy" }, + "sort-ord": { "$ref": "SortOrd" }, + "type-filter": { "items": { "$ref": "TypeFilter" }, "type": "array" } + }, + "type": "object" + }, + "IdfSymbolSortBy": { "enum": ["name"], "id": "IdfSymbolSortBy", "type": "string" }, + "IdfTaskParams": { + "id": "IdfTaskParams", + "properties": { + "search": { "type": "string" }, + "sort-by": { "$ref": "IdfTaskSortBy" }, + "sort-ord": { "$ref": "SortOrd" } + }, + "type": "object" + }, + "IdfTaskSortBy": { "enum": ["function", "name"], "id": "IdfTaskSortBy", "type": "string" }, + "Image": { + "description": "A image on which a scan is executed", + "id": "Image", + "properties": { + "file_name": { + "description": "The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.", + "type": "string" + }, + "id": { "$ref": "ImageId" } + }, + "type": "object" + }, + "ImageId": { + "description": "A wrapper struct `ImageId` around a UUID.\n ID in the images table.", + "format": "uuid", + "id": "ImageId", + "type": "string" + }, + "ImageType": { + "description": "Type of the image used in scan", + "enum": ["linux", "docker", "idf"], + "id": "ImageType", + "type": "string" + }, + "Info": { "id": "Info" }, + "InfoOverview": { "id": "InfoOverview" }, + "KernelOverview": { + "description": "Overview for Kernel analysis.", + "id": "KernelOverview", + "properties": { + "count": { + "description": "Number of kernel security features enabled.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "KernelParams": { + "id": "KernelParams", + "properties": { + "features-filter": { "items": { "$ref": "FeaturesFilter" }, "type": "array" }, + "sort-by": { "$ref": "KernelSortBy" }, + "sort-ord": { "$ref": "SortOrd" }, + "status-filter": { "items": { "$ref": "StatusFilter" }, "type": "array" } + }, + "type": "object" + }, + "KernelSortBy": { "enum": ["features", "status"], "id": "KernelSortBy", "type": "string" }, + "LinuxAnalysis": { + "description": "Represents different types of analyses that can be performed on a Linux system.", + "enum": [ + "info", + "kernel", + "cve", + "password-hash", + "crypto", + "software-bom", + "malware", + "hardening", + "capabilities" + ], + "id": "LinuxAnalysis", + "type": "string" + }, + "LinuxInfo": { + "description": "Represents the information about the system", + "id": "LinuxInfo", + "properties": { + "arch": { "description": "The tags associated with the system", "type": "string" }, + "banner": { "description": "The operating system name", "type": "string" }, + "kernel_version": { "description": "The kernel version", "type": "string" }, + "libc": { "description": "The operating system version", "type": "string" } + }, + "type": "object" + }, + "LinuxInfoResult": { "description": "Info result for linux image", "id": "LinuxInfoResult" }, + "MalwareOverview": { + "description": "Overview for Malware analysis.", + "id": "MalwareOverview", + "properties": { + "count": { + "description": "Number of malware detected.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "MalwareParams": { + "id": "MalwareParams", + "properties": { "sort-by": { "$ref": "MalwareSortBy" }, "sort-ord": { "$ref": "SortOrd" } }, + "type": "object" + }, + "MalwareSortBy": { "enum": ["filename"], "id": "MalwareSortBy", "type": "string" }, + "NewScanResponse": { + "description": "The response if a new scan is created.", + "id": "NewScanResponse", + "properties": { "id": { "$ref": "ScanId" } }, + "type": "object" + }, + "ObjectId": { + "description": "A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.", + "format": "uuid", + "id": "ObjectId", + "type": "string" + }, + "PasswordHashOverview": { + "description": "Overview for Password Hash analysis.", + "id": "PasswordHashOverview", + "properties": { + "count": { + "description": "Number of passwords decoded.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "PasswordHashParams": { + "id": "PasswordHashParams", + "properties": { + "severity-filter": { "items": { "$ref": "PasswordHashSeverityFilter" }, "type": "array" }, + "sort-by": { "$ref": "PasswordHashSortBy" }, + "sort-ord": { "$ref": "SortOrd" } + }, + "type": "object" + }, + "PasswordHashSeverityFilter": { + "enum": ["medium", "high"], + "id": "PasswordHashSeverityFilter", + "type": "string" + }, + "PasswordHashSortBy": { + "enum": ["severity", "username"], + "id": "PasswordHashSortBy", + "type": "string" + }, + "QueryName": { + "description": "Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.", + "enum": ["license-filter"], + "id": "QueryName", + "type": "string" + }, + "Requirement": { + "description": "Represents a requirement in the report", + "id": "Requirement", + "properties": { + "advice": { + "description": "Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.", + "type": "string" + }, + "ai-suggestion": { "$ref": "AiResult" }, + "analyzer": { "$ref": "AnalyzerResult" }, + "description": { "description": "Description of the requirement.", "type": "string" }, + "explanation": { + "description": "Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.", + "type": "string" + }, + "id": { "$ref": "RequirementId" }, + "policy-ref": { + "description": "Reference to the policy associated with the requirement.", + "type": "string" + }, + "status": { "$ref": "RequirementStatus" }, + "user-overwrite": { "$ref": "UserResult" } + }, + "type": "object" + }, + "RequirementId": { + "description": "Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.", + "enum": [ + "cve-exploits", + "password-strength", + "security-updates", + "update-notifications", + "access-control", + "unauthorized-access", + "data-encryption", + "data-integrity", + "data-collection", + "essential-availability", + "minimise-impact", + "attack-surfaces", + "attack-reduction", + "activity-monitoring", + "data-removal", + "vulns-documentation", + "vulns-security-updates", + "update-security-and-automation", + "security-testing-and-review", + "fixed-vulns-disclosure", + "vulns-coordinated-disclosure", + "vulns-reporting-contact", + "security-updates-dissemination" + ], + "id": "RequirementId", + "type": "string" + }, + "RequirementOverwrite": { + "description": "User action on a CRA requirement — either a manual overwrite or an AI suggestion response.", + "id": "RequirementOverwrite" + }, + "RequirementStatus": { + "description": "Overall status of the requirement\ncomputed by taking into account all user interactions.", + "enum": ["passed", "failed", "unknown", "unknown-with-suggestion", "not-applicable"], + "id": "RequirementStatus", + "type": "string" + }, + "RiskLevelCount": { + "description": "Count all different risk levels of the analysis.", + "id": "RiskLevelCount", + "properties": { + "critical": { "format": "int64", "type": "integer" }, + "high": { "format": "int64", "type": "integer" }, + "low": { "format": "int64", "type": "integer" }, + "medium": { "format": "int64", "type": "integer" }, + "none": { "format": "int64", "type": "integer" }, + "unknown": { "format": "int64", "type": "integer" } + }, + "type": "object" + }, + "SbomParams": { + "id": "SbomParams", + "properties": { + "license-filter": { "items": { "type": "string" }, "type": "array" }, + "search": { "type": "string" }, + "sort-by": { "$ref": "SbomSortBy" }, + "sort-ord": { "$ref": "SortOrd" }, + "type-filter": { "items": { "$ref": "ComponentType" }, "type": "array" } + }, + "type": "object" + }, + "SbomSortBy": { "enum": ["name"], "id": "SbomSortBy", "type": "string" }, + "Scan": { + "description": "Represents a scan that aggregates multiple analyses executed on a particular image.", + "id": "Scan", + "properties": { + "analysis": { + "description": "All analyses processed as part of this scan.", + "items": { "$ref": "AnalysisState" }, + "type": "array" + }, + "created": { + "description": "The date and time when the scan was initiated.", + "format": "date-time", + "type": "string" + }, + "id": { "$ref": "ScanId" }, + "image": { "$ref": "Image" }, + "image_type": { "$ref": "ImageType" }, + "info": { "$ref": "Info" }, + "score": { "$ref": "ScanScore" } + }, + "type": "object" + }, + "ScanId": { + "description": "A wrapper struct `ScanId` around a UUID.\n ID in the scans table.", + "format": "uuid", + "id": "ScanId", + "type": "string" + }, + "ScanOverview": { + "description": "Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.", + "id": "ScanOverview", + "properties": { + "capabilities": { "$ref": "CapabilitiesOverview" }, + "crypto": { "$ref": "CryptoOverview" }, + "cve": { "$ref": "CveOverview" }, + "hardening": { "$ref": "HardeningOverview" }, + "info": { "$ref": "InfoOverview" }, + "kernel": { "$ref": "KernelOverview" }, + "malware": { "$ref": "MalwareOverview" }, + "password-hash": { "$ref": "PasswordHashOverview" }, + "software-bom": { "$ref": "SoftwareBOMOverview" }, + "stack-overflow": { "$ref": "StackOverflowOverview" }, + "symbols": { "$ref": "SymbolsOverview" }, + "tasks": { "$ref": "TasksOverview" } + }, + "type": "object" + }, + "ScanScore": { + "description": "The calculate score with an weighted algorithm over all analysis.", + "id": "ScanScore", + "properties": { + "score": { "$ref": "Score" }, + "scores": { + "description": "Individual analyses scores.", + "items": { "$ref": "AnalysisScore" }, + "type": "array" + } + }, + "type": "object" + }, + "ScanStatus": { + "description": "The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).", + "id": "ScanStatus", + "properties": { "id": { "$ref": "ScanId" }, "status": { "$ref": "AnalysisStatus" } }, + "type": "object" + }, + "ScanType": { + "description": "Represents a unified type for analyses across all supported images.", + "id": "ScanType" + }, + "Score": { + "description": "Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.", + "format": "int32", + "id": "Score", + "type": "integer" + }, + "Section": { + "description": "Represents a group of requirements, grouped by [SubSection]s.", + "id": "Section", + "properties": { + "label": { "description": "Name of the requirement", "type": "string" }, + "policy-ref": { + "description": "Reference to the policy associated with the requirement", + "type": "string" + }, + "sub-sections": { + "description": "List of sub-requirements or checks associated with this requirement", + "items": { "$ref": "SubSection" }, + "type": "array" + } + }, + "type": "object" + }, + "SeverityFilter": { + "enum": ["none", "low", "medium", "high", "critical", "unknown"], + "id": "SeverityFilter", + "type": "string" + }, + "SoftwareBOMOverview": { + "description": "Overview for Software BOM analysis.", + "id": "SoftwareBOMOverview", + "properties": { + "count": { + "description": "Total number of software BOM entries.", + "format": "int64", + "type": "integer" + }, + "licenses": { + "description": "License type and their number of occurrences.", + "type": "object" + } + }, + "type": "object" + }, + "SortOrd": { "enum": ["asc", "desc"], "id": "SortOrd", "type": "string" }, + "StackOverflowOverview": { + "description": "Overview for Stack Overflow analysis.", + "id": "StackOverflowOverview", + "properties": { + "method": { + "description": "Name of the protection method used,\nor `None` if stack overflow protection is not enabled.", + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "Status of the AI suggestions computation.", + "enum": ["in-progress", "finished"], + "id": "Status", + "type": "string" + }, + "StatusFilter": { "enum": ["enabled", "disabled"], "id": "StatusFilter", "type": "string" }, + "SubSection": { + "description": "Represents a group of requirements", + "id": "SubSection", + "properties": { + "label": { "description": "Name of the requirement", "type": "string" }, + "requirements": { + "description": "List of sub-requirements or checks associated with this requirement", + "items": { "$ref": "Requirement" }, + "type": "array" + } + }, + "type": "object" + }, + "SuggestionResponse": { + "description": "User response to AI suggestion.", + "enum": ["accepted", "rejected"], + "id": "SuggestionResponse", + "type": "string" + }, + "SymbolsOverview": { + "description": "Overview for Symbol analysis.", + "id": "SymbolsOverview", + "properties": { + "count": { + "description": "Number of analyzed symbols.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "TasksOverview": { + "description": "Overview for Task analysis.", + "id": "TasksOverview", + "properties": { + "count": { + "description": "Number of analysed tasks.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "TypeFilter": { + "enum": ["sect", "func", "obj", "file", "notype"], + "id": "TypeFilter", + "type": "string" + }, + "UpdateObject": { + "description": "The request to update fields on an [`Object`].", + "id": "UpdateObject", + "properties": { + "description": { "description": "Description of the object.", "type": "string" }, + "favorite": { + "description": "Sets if the object is a favorite or not.", + "type": "boolean" + }, + "name": { "description": "Name of the object.", "type": "string" }, + "tags": { + "description": "The tags associated with the object.", + "items": { "type": "string" }, + "type": "array" + } + }, + "type": "object" + }, + "UserId": { + "description": "A wrapper struct `UserId` around a UUID.\n ID in the users table.", + "format": "uuid", + "id": "UserId", + "type": "string" + }, + "UserResult": { + "id": "UserResult", + "properties": { "status": { "$ref": "UserStatus" } }, + "type": "object" + }, + "UserStatus": { + "description": "Represents the status of a requirement overwritten by the user", + "enum": ["passed", "failed"], + "id": "UserStatus", + "type": "string" + }, + "UserUploadedDocument": { + "description": "Description of the user provided file\nused by ai to give its suggestion.", + "id": "UserUploadedDocument", + "properties": { + "filename": { "description": "Name of the user uploaded file.", "type": "string" } + }, + "type": "object" + } + }, + "resources": { + "api": { + "resources": { + "health": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.health.list", + "httpMethod": "GET", + "path": "api/health", + "description": "Returns if the service is in an healthy state.", + "response": { "$ref": "HealthStatus" } + } + } + }, + "objects": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.objects.create", + "httpMethod": "POST", + "path": "api/objects", + "description": "Create new object", + "request": { "$ref": "CreateObject" } + }, + "delete": { + "id": "analyzer-api-routes.api.objects.delete", + "httpMethod": "DELETE", + "path": "api/objects/{id}", + "description": "Deletes a object and all related scans.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Unique identifier of the object to delete" + } + }, + "parameterOrder": ["id"] + }, + "get": { + "id": "analyzer-api-routes.api.objects.get", + "httpMethod": "GET", + "path": "api/objects/{id}", + "description": "Retrieve an object by its ID.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": ["id"] + }, + "list": { + "id": "analyzer-api-routes.api.objects.list", + "httpMethod": "GET", + "path": "api/objects", + "description": "Retrieve a list of all objects of the current user.", + "parameters": { + "end_timestamp": { + "type": "string", + "required": false, + "location": "query", + "description": "End timestamp for pagination.", + "format": "datetime" + }, + "id": { + "type": "string", + "required": false, + "location": "query", + "description": "Pagination cursor (UUID).", + "format": "uuid" + }, + "limit": { + "type": "integer", + "required": false, + "location": "query", + "description": "Maximum number of items per page.", + "format": "int32" + }, + "start_timestamp": { + "type": "string", + "required": false, + "location": "query", + "description": "Start timestamp for pagination.", + "format": "datetime" + } + } + }, + "update": { + "id": "analyzer-api-routes.api.objects.update", + "httpMethod": "PUT", + "path": "api/objects/{id}", + "description": "Update an object", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": ["id"], + "request": { "$ref": "UpdateObject" } + } + }, + "resources": { + "scans": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.objects.scans.list", + "httpMethod": "GET", + "path": "api/objects/{id}/scans", + "description": "Those scans could be", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Object ID" + } + }, + "parameterOrder": ["id"] + } + } + } + } + }, + "scans": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.create", + "httpMethod": "POST", + "path": "api/scans", + "description": "Schedule a new scan.", + "response": { "$ref": "NewScanResponse" } + }, + "delete": { + "id": "analyzer-api-routes.api.scans.delete", + "httpMethod": "DELETE", + "path": "api/scans/{id}", + "description": "Delete a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"] + }, + "get": { + "id": "analyzer-api-routes.api.scans.get", + "httpMethod": "GET", + "path": "api/scans/{id}", + "description": "Returns a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "Scan" } + }, + "list": { + "id": "analyzer-api-routes.api.scans.list", + "httpMethod": "GET", + "path": "api/scans", + "description": "Retrieve a list of scans." + } + }, + "resources": { + "cancel": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.cancel.create", + "httpMethod": "POST", + "path": "api/scans/{id}/cancel", + "description": "This can be used to cancel an already pending or running scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"] + } + } + }, + "compliance-check": { + "resources": { + "cyber-resilience-act": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act", + "description": "Computes compliance with Cyber Resilience Act", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "CyberResilienceActReport" } + } + }, + "resources": { + "ai-suggestion": { + "resources": { + "begin": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create", + "httpMethod": "POST", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin", + "description": "Triggers CRA AI suggestion using user-provided documents.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"] + } + } + }, + "status": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status", + "description": "Returns status of the CRA AI suggestion.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "AiSuggestionStatus" } + } + } + } + } + }, + "overwrite": { + "methods": { + "overwrite_compliance_check_requirement": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement", + "httpMethod": "PUT", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/overwrite", + "description": "Overwrites compliance check requirement", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"], + "request": { "$ref": "RequirementOverwrite" } + } + } + }, + "report": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list", + "httpMethod": "GET", + "path": "api/scans/{id}/compliance-check/cyber-resilience-act/report", + "description": "Downloads Cyber Resilience Act compliance report as PDF", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"] + } + } + } + } + } + } + }, + "documents": { + "methods": { + "create": { + "id": "analyzer-api-routes.api.scans.documents.create", + "httpMethod": "POST", + "path": "api/scans/{id}/documents", + "description": "Upload a document for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "DocumentUploadResponse" } + }, + "delete": { + "id": "analyzer-api-routes.api.scans.documents.delete", + "httpMethod": "DELETE", + "path": "api/scans/{id}/documents/{file_name}", + "description": "Delete a single document for a scan.", + "parameters": { + "file_name": { + "type": "string", + "required": true, + "location": "path", + "description": "Document file name" + }, + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id", "file_name"] + }, + "delete_documents": { + "id": "analyzer-api-routes.api.scans.documents.delete_documents", + "httpMethod": "DELETE", + "path": "api/scans/{id}/documents", + "description": "Delete all documents for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"] + }, + "list": { + "id": "analyzer-api-routes.api.scans.documents.list", + "httpMethod": "GET", + "path": "api/scans/{id}/documents", + "description": "List documents for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "DocumentListResponse" } + } + } + }, + "overview": { + "methods": { + "get": { + "id": "analyzer-api-routes.api.scans.overview.get", + "httpMethod": "GET", + "path": "api/scans/{scan_id}/overview/{analysis_id}", + "description": "Returns an overview of one analysis.", + "parameters": { + "analysis_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Analysis ID" + }, + "scan_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["scan_id", "analysis_id"], + "response": { "$ref": "AnalysisOverview" } + }, + "list": { + "id": "analyzer-api-routes.api.scans.overview.list", + "httpMethod": "GET", + "path": "api/scans/{id}/overview", + "description": "Returns an aggregated overview of all analysis executed for one scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "ScanOverview" } + } + } + }, + "report": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.report.list", + "httpMethod": "GET", + "path": "api/scans/{id}/report", + "description": "Downloads a PDF security report for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"] + } + } + }, + "results": { + "methods": { + "get": { + "id": "analyzer-api-routes.api.scans.results.get", + "httpMethod": "GET", + "path": "api/scans/{scan_id}/results/{analysis_id}", + "description": "Retrieve the results of one specific analysis of a scan.", + "parameters": { + "analysis_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Analysis ID" + }, + "page": { + "type": "integer", + "required": false, + "location": "query", + "description": "Page number (must be > 0). If provided, `per-page` must also be provided.", + "format": "int32" + }, + "per-page": { + "type": "integer", + "required": false, + "location": "query", + "description": "Items per page (must be > 0). If provided, `page` must also be provided.", + "format": "int32" + }, + "query": { + "type": "string", + "required": true, + "location": "query", + "description": "Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types." + }, + "scan_id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["scan_id", "analysis_id"], + "response": { "$ref": "AnalysisResultDTO" } + } + } + }, + "sbom": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.sbom.list", + "httpMethod": "GET", + "path": "api/scans/{id}/sbom", + "description": "Downloads the SBOM (CycloneDX JSON) for a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID", + "format": "uuid" + } + }, + "parameterOrder": ["id"] + } + } + }, + "score": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.score.list", + "httpMethod": "GET", + "path": "api/scans/{id}/score", + "description": "Returns a security score of all successful finished analyses with their individual scores included.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "ScanScore" } + } + } + }, + "status": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.status.list", + "httpMethod": "GET", + "path": "api/scans/{id}/status", + "description": "Returns the status of a scan.", + "parameters": { + "id": { + "type": "string", + "required": true, + "location": "path", + "description": "Scan ID" + } + }, + "parameterOrder": ["id"], + "response": { "$ref": "ScanStatus" } + } + } + }, + "types": { + "methods": { + "list": { + "id": "analyzer-api-routes.api.scans.types.list", + "httpMethod": "GET", + "path": "api/scans/types", + "description": "Returns a list of all available analysis types for each different image." + } + } + } + } + } + } + } + } +} diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index 060565b..dc1c6f8 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -28,7 +28,10 @@ analyzer api analyzer scans [flags] ### cancel - `create` — This can be used to cancel an already pending or running scan. - - `compliance-check` — Operations on the 'compliance-check' resource + +### compliance-check + + - `cyber-resilience-act` — Operations on the 'cyber-resilience-act' resource ### documents diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index bef13a0..a5bfe0b 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -228,17 +228,19 @@ fn render_methods( /// Render sub-resources. Deep nesting is listed as a pointer, not fully expanded. fn render_sub_resources(out: &mut String, resource: &DiscoveryResource) { for (sub_name, sub_resource) in &resource.resources { + writeln!(out).unwrap(); + writeln!(out, "### {sub_name}").unwrap(); + writeln!(out).unwrap(); if sub_resource.methods.is_empty() && !sub_resource.resources.is_empty() { - // Container resource — just list it as a pointer - writeln!( - out, - " - `{sub_name}` — Operations on the '{sub_name}' resource" - ) - .unwrap(); + // Container resource — list nested resources as pointers + for nested_name in sub_resource.resources.keys() { + writeln!( + out, + " - `{nested_name}` — Operations on the '{nested_name}' resource" + ) + .unwrap(); + } } else { - writeln!(out).unwrap(); - writeln!(out, "### {sub_name}").unwrap(); - writeln!(out).unwrap(); render_methods(out, &sub_resource.methods); // Nested sub-resources listed as pointers for nested_name in sub_resource.resources.keys() { From 955d22ca303c0312c157b056f614bb77936d6664 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 18 Mar 2026 13:06:06 +0100 Subject: [PATCH 11/38] feat(dommyrock-analyzer-cli): make skill generation service agnostic --- docs/skills.md | 2 +- skills/analyzer-health/SKILL.md | 2 +- skills/analyzer-objects/SKILL.md | 2 +- skills/analyzer-scans/SKILL.md | 2 +- skills/{shared => analyzer-shared}/SKILL.md | 8 ++-- src/api/generate_skills.rs | 51 ++++++++++++--------- src/main.rs | 2 +- 7 files changed, 38 insertions(+), 31 deletions(-) rename skills/{shared => analyzer-shared}/SKILL.md (93%) diff --git a/docs/skills.md b/docs/skills.md index 0189f57..94ceabb 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -7,5 +7,5 @@ | [analyzer-health](../skills/analyzer-health/SKILL.md) | API operations for analyzer-health. | | [analyzer-objects](../skills/analyzer-objects/SKILL.md) | API operations for analyzer-objects. | | [analyzer-scans](../skills/analyzer-scans/SKILL.md) | API operations for analyzer-scans. | -| [shared](../skills/shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling. | +| [analyzer-shared](../skills/analyzer-shared/SKILL.md) | Shared patterns for authentication, global flags, and error handling (analyzer-shared). | diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index 86e04a2..d814fb5 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -12,7 +12,7 @@ metadata: # health (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer health [flags] diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index 7a3719a..47494a1 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -12,7 +12,7 @@ metadata: # objects (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer objects [flags] diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index dc1c6f8..26f04d5 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -12,7 +12,7 @@ metadata: # scans (0.5.0) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api analyzer scans [flags] diff --git a/skills/shared/SKILL.md b/skills/analyzer-shared/SKILL.md similarity index 93% rename from skills/shared/SKILL.md rename to skills/analyzer-shared/SKILL.md index 6dd7799..6f78e72 100644 --- a/skills/shared/SKILL.md +++ b/skills/analyzer-shared/SKILL.md @@ -1,6 +1,6 @@ --- -name: shared -description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +name: analyzer-shared +description: "analyzer CLI: Shared patterns for authentication, global flags, and error handling." metadata: openclaw: category: "security" @@ -60,10 +60,10 @@ Before calling any API method, inspect it: analyzer schema analyzer.api # Inspect a specific method -analyzer schema analyzer.scans.create +analyzer schema analyzer.. # Browse a resource's methods -analyzer schema analyzer.scans.compliance-check +analyzer schema analyzer. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index a5bfe0b..835951b 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -61,8 +61,13 @@ pub fn generate_for_service( Ok(()) } -/// Generate the shared skill file (service-agnostic: auth, flags, patterns). -pub fn generate_shared(output_dir: &Path) -> Result<()> { +/// Generate the shared skill file for a specific service. +/// +/// Writes `skills/{alias}-shared/SKILL.md` following the same `{alias}-{name}` +/// pattern used by resource skills, so each service (analyzer, isaac, …) gets +/// its own shared skill. +pub fn generate_shared(entry: &ServiceEntry, output_dir: &Path) -> Result<()> { + let alias = entry.aliases[0]; std::fs::create_dir_all(output_dir).with_context(|| { format!( "failed to create output directory: {}", @@ -70,9 +75,9 @@ pub fn generate_shared(output_dir: &Path) -> Result<()> { ) })?; - let shared_dir = output_dir.join("shared"); + let shared_dir = output_dir.join(format!("{alias}-shared")); std::fs::create_dir_all(&shared_dir)?; - let content = generate_shared_skill(); + let content = generate_shared_skill(entry); let shared_path = shared_dir.join("SKILL.md"); std::fs::write(&shared_path, &content) .with_context(|| format!("failed to write {}", shared_path.display()))?; @@ -95,9 +100,8 @@ pub fn write_skills_index(output_dir: &Path) -> Result<()> { let name = entry.file_name().to_string_lossy().to_string(); let skill_file = entry.path().join("SKILL.md"); if skill_file.exists() { - let description = if name == "shared" { - "Shared patterns for authentication, global flags, and error handling." - .to_string() + let description = if name.ends_with("-shared") { + format!("Shared patterns for authentication, global flags, and error handling ({name}).") } else { format!("API operations for {name}.") }; @@ -174,7 +178,7 @@ metadata: # {name} ({version}) -> **PREREQUISITE:** Read `../shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **PREREQUISITE:** Read `../{service_alias}-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. ```bash analyzer api {service_alias} {name} [flags] @@ -274,28 +278,31 @@ fn truncate_desc(desc: &str) -> String { // Rendering — shared skill (raw string for readability) // --------------------------------------------------------------------------- -/// Generate the shared SKILL.md using raw strings for readability. +/// Generate the shared SKILL.md for a specific service. /// -/// Covers registered services, authentication, global flags, CLI syntax, -/// schema introspection, security rules, and error handling. -fn generate_shared_skill() -> String { +/// Uses the service alias to produce `{alias}-shared` naming and +/// `{alias}`-specific schema examples, so each service gets its own +/// shared skill when multiple discovery documents are registered. +fn generate_shared_skill(entry: &ServiceEntry) -> String { + let alias = entry.aliases[0]; + // Build the services table dynamically let mut services_table = String::new(); - for entry in SERVICES { + for svc in SERVICES { writeln!( services_table, "| `{}` | {} | {} |", - entry.aliases.join(", "), - entry.api_name, - entry.description + svc.aliases.join(", "), + svc.api_name, + svc.description ) .unwrap(); } format!( r#"--- -name: shared -description: "Analyzer CLI: Shared patterns for authentication, global flags, and error handling." +name: {alias}-shared +description: "{alias} CLI: Shared patterns for authentication, global flags, and error handling." metadata: openclaw: category: "security" @@ -303,7 +310,7 @@ metadata: bins: ["analyzer"] --- -# analyzer — Shared Reference +# {alias} — Shared Reference ## Registered Services @@ -351,13 +358,13 @@ Before calling any API method, inspect it: ```bash # Browse all resources for a service -analyzer schema analyzer.api +analyzer schema {alias}.api # Inspect a specific method -analyzer schema analyzer.scans.create +analyzer schema {alias}.. # Browse a resource's methods -analyzer schema analyzer.scans.compliance-check +analyzer schema {alias}. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. diff --git a/src/main.rs b/src/main.rs index d39b2b3..aa870f3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -624,8 +624,8 @@ async fn run(cli: Cli) -> Result<()> { discovery::load_for_service(entry).await? }; api::generate_skills::generate_for_service(&doc, entry, skills_dir)?; + api::generate_skills::generate_shared(entry, skills_dir)?; } - api::generate_skills::generate_shared(skills_dir)?; api::generate_skills::write_skills_index(skills_dir)?; println!("\nDone."); Ok(()) From 4021378e62ac01f350db0ca68f81328ce6c987d2 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 18 Mar 2026 15:50:42 +0100 Subject: [PATCH 12/38] feat(dommyrock-analyzer-cli): support table and csv --format and generic response formatter --- ...on-flow.md => Schema_introspection_flow.md | 0 skills/analyzer-shared/SKILL.md | 2 +- src/api/executor.rs | 10 +- src/api/generate_skills.rs | 2 +- src/commands/object.rs | 4 +- src/commands/scan.rs | 367 +------------- src/discovery.rs | 8 - src/output.rs | 452 ++++++++++++++++-- 8 files changed, 456 insertions(+), 389 deletions(-) rename Schema-introspection-flow.md => Schema_introspection_flow.md (100%) diff --git a/Schema-introspection-flow.md b/Schema_introspection_flow.md similarity index 100% rename from Schema-introspection-flow.md rename to Schema_introspection_flow.md diff --git a/skills/analyzer-shared/SKILL.md b/skills/analyzer-shared/SKILL.md index 6f78e72..66027e5 100644 --- a/skills/analyzer-shared/SKILL.md +++ b/skills/analyzer-shared/SKILL.md @@ -36,7 +36,7 @@ export ANALYZER_API_KEY="your-api-key" | `--page-all` | Auto-paginate results as NDJSON | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | -| `--format ` | Output format: `human` (default), `json` | +| `--format ` | Output format: `human` (default), `json`, `table`, `csv` | ## CLI Syntax diff --git a/src/api/executor.rs b/src/api/executor.rs index 15a555f..2c78bd3 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -56,7 +56,7 @@ pub async fn execute_method( fields: Option<&str>, pagination: &PaginationConfig, dry_run: bool, - _format: Format, + format: Format, ) -> Result<()> { let params: serde_json::Map = match params_json { Some(s) => serde_json::from_str(s).context("invalid --params JSON")?, @@ -125,10 +125,12 @@ pub async fn execute_method( }; if pagination.page_all { - // NDJSON: one compact JSON line per page - println!("{}", serde_json::to_string(&filtered)?); + let out = + crate::output::format_value_paginated(&filtered, &format, pages_fetched == 1); + print!("{out}"); } else { - println!("{}", serde_json::to_string_pretty(&filtered)?); + let out = crate::output::format_value(&filtered, &format); + println!("{out}"); break; } diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 835951b..4a2559e 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -337,7 +337,7 @@ export ANALYZER_API_KEY="your-api-key" | `--page-all` | Auto-paginate results as NDJSON | | `--dry-run` | Validate and print request without executing | | `--discovery ` | Override discovery document (dev/testing) | -| `--format ` | Output format: `human` (default), `json` | +| `--format ` | Output format: `human` (default), `json`, `table`, `csv` | ## CLI Syntax diff --git a/src/commands/object.rs b/src/commands/object.rs index d9d4a64..246e703 100644 --- a/src/commands/object.rs +++ b/src/commands/object.rs @@ -20,7 +20,7 @@ pub async fn run_list(client: &AnalyzerClient, format: Format) -> Result<()> { serde_json::to_string_pretty(&serde_json::to_value(&objects)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { if objects.is_empty() { output::status( "Objects", @@ -114,7 +114,7 @@ pub async fn run_new( serde_json::to_string_pretty(&serde_json::to_value(&object)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { output::success(&format!("Created object '{}' ({})", object.name, object.id)); } } diff --git a/src/commands/scan.rs b/src/commands/scan.rs index d93dd7b..1c9fe01 100644 --- a/src/commands/scan.rs +++ b/src/commands/scan.rs @@ -10,11 +10,10 @@ use uuid::Uuid; use crate::client::AnalyzerClient; use crate::client::models::{ - AnalysisStatus, AnalysisStatusEntry, AnalysisType, CapabilityFinding, ComplianceReport, - ComplianceType, CryptoFinding, CveFinding, HardeningFinding, IdfSymbolFinding, IdfTaskFinding, - KernelFinding, MalwareFinding, PasswordFinding, ResultsQuery, SbomComponent, ScanTypeRequest, + AnalysisStatus, AnalysisStatusEntry, AnalysisType, ComplianceReport, ComplianceType, + ResultsQuery, ScanTypeRequest, }; -use crate::output::{self, Format, format_score, format_status}; +use crate::output::{self, Format, format_score, format_status, format_value}; /// Resolve a scan ID from either an explicit --scan or an --object flag. /// When --object is used, fetches the object and returns its last scan ID. @@ -190,7 +189,7 @@ pub async fn run_score(client: &AnalyzerClient, scan_id: Uuid, format: Format) - serde_json::to_string_pretty(&serde_json::to_value(&score)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!( "\n {} {}", style("Overall Score:").bold(), @@ -232,7 +231,7 @@ pub async fn run_types(client: &AnalyzerClient, format: Format) -> Result<()> { serde_json::to_string_pretty(&serde_json::to_value(&types)?)? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { for st in &types { eprintln!("\n {}", style(&st.image_type).bold().underlined()); for a in &st.analyses { @@ -282,7 +281,7 @@ fn print_status( serde_json::to_string_pretty(&serde_json::Value::Object(map))? ); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!( "\n {} {} ({})", style("Scan").bold(), @@ -397,7 +396,7 @@ pub async fn run_overview(client: &AnalyzerClient, scan_id: Uuid, format: Format Format::Json => { println!("{}", serde_json::to_string_pretty(&overview)?); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { eprintln!("\n {} {}\n", style("Scan Overview").bold(), scan_id); if let Some(cve) = &overview.cve { @@ -544,349 +543,33 @@ pub async fn run_results( Format::Json => { println!("{}", serde_json::to_string_pretty(&results)?); } - Format::Human | Format::Table => { - let all_values: Vec<&serde_json::Value> = results.findings.iter().collect(); - - if all_values.is_empty() { - eprintln!("\n No findings.\n"); + Format::Human | Format::Table | Format::Csv => { + if results.findings.is_empty() { + if matches!(format, Format::Csv) { + // CSV: nothing to output + } else { + eprintln!("\n No findings.\n"); + } return Ok(()); } - match analysis_type { - AnalysisType::Cve => render_cve_table(&all_values)?, - AnalysisType::PasswordHash => render_password_table(&all_values)?, - AnalysisType::Malware => render_malware_table(&all_values)?, - AnalysisType::Hardening => render_hardening_table(&all_values)?, - AnalysisType::Capabilities => render_capabilities_table(&all_values)?, - AnalysisType::Crypto => render_crypto_table(&all_values)?, - AnalysisType::SoftwareBom => render_sbom_table(&all_values)?, - AnalysisType::Kernel => render_kernel_table(&all_values)?, - AnalysisType::Symbols => render_symbols_table(&all_values)?, - AnalysisType::Tasks => render_tasks_table(&all_values)?, - AnalysisType::Info => render_info(&all_values)?, - AnalysisType::StackOverflow => render_info(&all_values)?, - } - - let total_pages = results.total_findings.div_ceil(per_page as u64); - eprintln!( - "\n Page {}/{} ({} total) — use --page N to navigate\n", - page, total_pages, results.total_findings, - ); - } - } - Ok(()) -} - -fn render_cve_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<15} {:<5} {:<14} {:<20} {}", - style("Severity").underlined(), - style("CVE ID").underlined(), - style("Score").underlined(), - style("Vendor").underlined(), - style("Product").underlined(), - style("Summary").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let score_str = f - .cvss - .as_ref() - .and_then(|c| c.v3.as_ref().or(c.v2.as_ref())) - .and_then(|d| d.base_score) - .map(|s| format!("{s:.1}")) - .unwrap_or_default(); - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - let product = f - .products - .first() - .and_then(|p| p.product.as_deref()) - .unwrap_or("-"); - let summary = f.summary.as_deref().unwrap_or(""); - let summary_trunc = if summary.len() > 40 { - format!("{}...", &summary[..37]) - } else { - summary.to_string() - }; - eprintln!( - " {} {:<15} {:<5} {:<14} {:<20} {}", - sev, - f.cveid.as_deref().unwrap_or("-"), - score_str, - truncate_str(f.vendor.as_deref().unwrap_or("-"), 14), - truncate_str(product, 20), - summary_trunc, - ); - } - } - Ok(()) -} - -fn render_password_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<20} {}", - style("Severity").underlined(), - style("Username").underlined(), - style("Password").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {} {:<20} {}", - sev, - f.username.as_deref().unwrap_or("-"), - f.password.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_malware_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<40} {}", - style("Filename").underlined(), - style("Description").underlined(), - style("Engine").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<30} {:<40} {}", - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - truncate_str(f.description.as_deref().unwrap_or("-"), 40), - f.detection_engine.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_hardening_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<8} {:<30} {:<6} {:<3} {:<7} {:<7} {}", - style("Severity").underlined(), - style("Filename").underlined(), - style("Canary").underlined(), - style("NX").underlined(), - style("PIE").underlined(), - style("RELRO").underlined(), - style("Fortify").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.severity.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {} {:<30} {} {} {:<7} {:<7} {}", - sev, - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - format_bool(f.canary.unwrap_or(false), 6), - format_bool(f.nx.unwrap_or(false), 3), - f.pie.as_deref().unwrap_or("-"), - f.relro.as_deref().unwrap_or("-"), - format_bool(f.fortify.unwrap_or(false), 7), - ); - } - } - Ok(()) -} - -fn render_capabilities_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<8} {:<9} {}", - style("Filename").underlined(), - style("Severity").underlined(), - style("Behaviors").underlined(), - style("Syscalls").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let sev = format_severity(f.level.as_deref().unwrap_or("unknown"), 8); - eprintln!( - " {:<30} {} {:<9} {}", - truncate_str(f.filename.as_deref().unwrap_or("-"), 30), - sev, - f.behaviors.len(), - f.syscalls.len(), - ); - } - } - Ok(()) -} - -/// Format a severity string with color and fixed-width padding. -fn format_severity(severity: &str, width: usize) -> String { - let padded = format!("{: style(padded).red().bold().to_string(), - "high" => style(padded).red().to_string(), - "medium" => style(padded).yellow().to_string(), - "low" => style(padded).green().to_string(), - _ => style(padded).dim().to_string(), - } -} - -/// Truncate a string to max chars, adding "..." if needed. -fn truncate_str(s: &str, max: usize) -> String { - if s.len() > max { - format!("{}...", &s[..max.saturating_sub(3)]) - } else { - format!("{: String { - if val { - style(format!("{: Result<()> { - eprintln!(); - eprintln!( - " {:<14} {:<20} {:<20} {:<8} {}", - style("Type").underlined(), - style("Filename").underlined(), - style("Path").underlined(), - style("Key Size").underlined(), - style("Aux").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let aux = if f.aux.is_empty() { - "-".to_string() + let findings_value = serde_json::to_value(&results.findings)?; + let formatted = format_value(&findings_value, &format); + if matches!(format, Format::Csv) { + print!("{formatted}"); } else { - f.aux.join(", ") - }; - eprintln!( - " {:<14} {:<20} {:<20} {:<8} {}", - truncate_str(f.crypto_type.as_deref().unwrap_or("-"), 14), - truncate_str(f.filename.as_deref().unwrap_or("-"), 20), - truncate_str(f.parent.as_deref().unwrap_or("-"), 20), - f.pubsz.map(|s| s.to_string()).as_deref().unwrap_or("-"), - truncate_str(&aux, 30), - ); - } - } - Ok(()) -} - -fn render_sbom_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {:<14} {:<12} {}", - style("Name").underlined(), - style("Version").underlined(), - style("Type").underlined(), - style("Licenses").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - let licenses = f - .licenses - .iter() - .filter_map(|l| { - l.get("license") - .and_then(|lic| lic.get("id").or_else(|| lic.get("name"))) - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) - }) - .collect::>() - .join(", "); - eprintln!( - " {:<30} {:<14} {:<12} {}", - truncate_str(f.name.as_deref().unwrap_or("-"), 30), - truncate_str(f.version.as_deref().unwrap_or("-"), 14), - f.component_type.as_deref().unwrap_or("-"), - if licenses.is_empty() { "-" } else { &licenses }, - ); - } - } - Ok(()) -} - -fn render_kernel_table(values: &[&serde_json::Value]) -> Result<()> { - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - if let Some(file) = &f.file { - eprintln!("\n {} {}", style("Kernel Config:").bold(), file); - } - if let Some(score) = f.score { - eprintln!(" Score: {}", score); - } - eprintln!(); - eprintln!( - " {:<40} {}", - style("Feature").underlined(), - style("Status").underlined(), - ); - for feat in &f.features { - eprintln!(" {:<40} {}", feat.name, format_bool(feat.enabled, 8),); + eprint!("\n{formatted}"); + let total_pages = results.total_findings.div_ceil(per_page as u64); + eprintln!( + "\n Page {}/{} ({} total) — use --page N to navigate\n", + page, total_pages, results.total_findings, + ); } } } Ok(()) } -fn render_symbols_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<40} {:<12} {}", - style("Name").underlined(), - style("Type").underlined(), - style("Bind").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<40} {:<12} {}", - truncate_str(f.symbol_name.as_deref().unwrap_or("-"), 40), - f.symbol_type.as_deref().unwrap_or("-"), - f.symbol_bind.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_tasks_table(values: &[&serde_json::Value]) -> Result<()> { - eprintln!(); - eprintln!( - " {:<30} {}", - style("Name").underlined(), - style("Function").underlined(), - ); - for val in values { - if let Ok(f) = serde_json::from_value::((*val).clone()) { - eprintln!( - " {:<30} {}", - truncate_str(f.task_name.as_deref().unwrap_or("-"), 30), - f.task_fn.as_deref().unwrap_or("-"), - ); - } - } - Ok(()) -} - -fn render_info(values: &[&serde_json::Value]) -> Result<()> { - for val in values { - eprintln!("\n{}", serde_json::to_string_pretty(val)?); - } - Ok(()) -} // =========================================================================== // Compliance @@ -905,7 +588,7 @@ pub async fn run_compliance( Format::Json => { println!("{}", serde_json::to_string_pretty(&report)?); } - Format::Human | Format::Table => { + Format::Human | Format::Table | Format::Csv => { render_compliance_human(&report, ct); } } diff --git a/src/discovery.rs b/src/discovery.rs index 8ecc310..f666072 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -12,10 +12,6 @@ use serde::{Deserialize, Serialize}; use crate::services::ServiceEntry; -// --------------------------------------------------------------------------- -// Serde models -// --------------------------------------------------------------------------- - /// Top-level Discovery Document. #[derive(Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] @@ -82,10 +78,6 @@ pub struct SchemaRef { pub ref_name: String, } -// --------------------------------------------------------------------------- -// Loading -// --------------------------------------------------------------------------- - /// Where the discovery document comes from. pub enum DiscoverySource { File(PathBuf), diff --git a/src/output.rs b/src/output.rs index 83316bc..875a086 100644 --- a/src/output.rs +++ b/src/output.rs @@ -1,7 +1,11 @@ -//! Output formatting: human (colored), JSON, and table modes. +//! Output formatting: human (colored), JSON, table, and CSV modes. +//! +//! Provides a generic `format_value` function that auto-detects arrays, +//! flattens nested objects into dot-notation columns, and renders tables +//! or CSV from arbitrary JSON responses. No per-analysis-type renderers +//! needed — the formatter handles any shape from the discovery API. -use console::style; -use owo_colors::OwoColorize; +use serde_json::Value; /// Output format selected by the user. #[derive(Debug, Clone, Copy, Default, clap::ValueEnum)] @@ -13,46 +17,432 @@ pub enum Format { Json, /// ASCII table output. Table, + /// CSV output for export. + Csv, } -/// Print a success message to stderr. -pub fn success(msg: &str) { - eprintln!(" {} {msg}", style("OK").green().bold()); +mod status { + use console::style; + use owo_colors::OwoColorize; + + /// Print a success message to stderr. + pub fn success(msg: &str) { + eprintln!(" {} {msg}", style("OK").green().bold()); + } + + /// Print a warning message to stderr. + pub fn warning(msg: &str) { + eprintln!(" {} {msg}", style("WARN").yellow().bold()); + } + + /// Print an error message to stderr. + pub fn error(msg: &str) { + eprintln!(" {} {msg}", style("ERR").red().bold()); + } + + /// Print a labelled status line to stderr. + pub fn status(label: &str, msg: &str) { + eprintln!("{} {msg}", style(format!("{label:>12}")).cyan().bold()); + } + + /// Format a score with colour coding. + pub fn format_score(score: Option) -> String { + match score { + Some(s) if s >= 80 => format!("{}", s.to_string().green()), + Some(s) if s >= 50 => format!("{}", s.to_string().yellow()), + Some(s) => format!("{}", s.to_string().red()), + None => style("--").dim().to_string(), + } + } + + /// Format an analysis status string with colour. + pub fn format_status(status: &str) -> String { + match status { + "success" => style(status).green().to_string(), + "pending" => style(status).dim().to_string(), + "in-progress" => style(status).cyan().to_string(), + "canceled" => style(status).yellow().to_string(), + "error" => style(status).red().to_string(), + other => other.to_string(), + } + } } -/// Print a warning message to stderr. -pub fn warning(msg: &str) { - eprintln!(" {} {msg}", style("WARN").yellow().bold()); +pub use status::*; + +/// Format a JSON value according to the selected output format. +/// +/// For `Table` and `Csv`: auto-detects arrays (including nested under a wrapper +/// key), flattens objects into dot-notation columns, and renders. +/// For `Json`: pretty-prints. +/// For `Human`: falls back to pretty JSON (callers may override with custom +/// rendering before calling this). +pub fn format_value(value: &Value, format: &Format) -> String { + match format { + Format::Json | Format::Human => serde_json::to_string_pretty(value).unwrap_or_default(), + Format::Table => table::format(value), + Format::Csv => csv::format(value), + } } -/// Print an error message to stderr. -pub fn error(msg: &str) { - eprintln!(" {} {msg}", style("ERR").red().bold()); +/// Format a paginated response. For NDJSON (Json), emits compact one-line JSON. +/// For Table/Csv, `is_first_page` controls whether headers are emitted. +pub fn format_value_paginated(value: &Value, format: &Format, is_first_page: bool) -> String { + match format { + Format::Json | Format::Human => { + // NDJSON: compact, one object per line + serde_json::to_string(value).unwrap_or_default() + } + Format::Table => table::format_page(value, is_first_page), + Format::Csv => csv::format_page(value, is_first_page), + } } -/// Print a labelled status line to stderr. -pub fn status(label: &str, msg: &str) { - eprintln!("{} {msg}", style(format!("{label:>12}")).cyan().bold()); +mod items { + use serde_json::Value; + use std::collections::BTreeMap; + + /// Extract items from a JSON value. If it's an array, use directly. + /// If it's an object with an array field (skipping metadata keys), extract that. + /// Otherwise wrap the single value as a one-element array. + pub fn extract(value: &Value) -> Vec<&Value> { + match value { + Value::Array(arr) => arr.iter().collect(), + Value::Object(map) => { + let skip_keys = [ + "nextPageToken", + "kind", + "total-findings", + "total_findings", + "_links", + "_embedded", + ]; + for (key, val) in map { + if skip_keys.contains(&key.as_str()) || key.starts_with('_') { + continue; + } + if let Value::Array(arr) = val { + if !arr.is_empty() { + return arr.iter().collect(); + } + } + } + vec![value] + } + _ => vec![value], + } + } + + /// Collect column names preserving insertion order from the first item, + /// then adding any extra keys from subsequent items. + pub fn collect_columns(items: &[&Value]) -> Vec { + let mut seen = std::collections::HashSet::new(); + let mut columns = Vec::new(); + + for item in items { + let flat = flatten(item); + for key in flat.keys() { + if seen.insert(key.clone()) { + columns.push(key.clone()); + } + } + } + columns + } + + /// Flatten a JSON value into a string map with dot-notation keys. + /// `{"user": {"name": "Alice"}, "id": 1}` becomes `{"user.name": "Alice", "id": "1"}` + pub fn flatten(value: &Value) -> BTreeMap { + let mut map = BTreeMap::new(); + flatten_recursive(value, String::new(), &mut map); + map + } + + fn flatten_recursive(value: &Value, prefix: String, map: &mut BTreeMap) { + match value { + Value::Object(obj) => { + for (k, v) in obj { + let key = if prefix.is_empty() { + k.clone() + } else { + format!("{prefix}.{k}") + }; + flatten_recursive(v, key, map); + } + } + Value::Array(arr) => { + let cells: Vec = arr.iter().map(value_to_cell).collect(); + map.insert(prefix, cells.join(", ")); + } + _ => { + map.insert(prefix, value_to_cell(value)); + } + } + } + + /// Convert a JSON value to a display string. + pub fn value_to_cell(value: &Value) -> String { + match value { + Value::Null => String::new(), + Value::String(s) => s.clone(), + Value::Bool(b) => b.to_string(), + Value::Number(n) => n.to_string(), + Value::Array(arr) => { + let cells: Vec = arr.iter().map(value_to_cell).collect(); + cells.join(", ") + } + Value::Object(_) => serde_json::to_string(value).unwrap_or_default(), + } + } } -/// Format a score with colour coding. -pub fn format_score(score: Option) -> String { - match score { - Some(s) if s >= 80 => format!("{}", s.to_string().green()), - Some(s) if s >= 50 => format!("{}", s.to_string().yellow()), - Some(s) => format!("{}", s.to_string().red()), - None => style("--").dim().to_string(), +mod table { + use super::items; + use serde_json::Value; + use std::fmt::Write as FmtWrite; + + const MAX_COL_WIDTH: usize = 50; + + pub fn format(value: &Value) -> String { + format_page(value, true) + } + + pub fn format_page(value: &Value, include_header: bool) -> String { + let extracted = items::extract(value); + + if extracted.is_empty() { + if include_header { + return "(no data)\n".to_string(); + } + return String::new(); + } + + let columns = items::collect_columns(&extracted); + let rows: Vec<_> = extracted.iter().map(|v| items::flatten(v)).collect(); + + let widths: Vec = columns + .iter() + .map(|col| { + let header_w = col.chars().count(); + let data_w = rows + .iter() + .map(|r| r.get(col).map(|v| v.chars().count()).unwrap_or(0)) + .max() + .unwrap_or(0); + header_w.max(data_w).min(MAX_COL_WIDTH) + }) + .collect(); + + let mut out = String::new(); + + if include_header { + let header: Vec = columns + .iter() + .zip(&widths) + .map(|(col, &w)| truncate_to_width(col, w)) + .collect(); + writeln!(out, " {}", header.join(" ")).unwrap(); + + let sep: Vec = widths.iter().map(|&w| "─".repeat(w)).collect(); + writeln!(out, " {}", sep.join(" ")).unwrap(); + } + + for row in &rows { + let cells: Vec = columns + .iter() + .zip(&widths) + .map(|(col, &w)| { + let val = row.get(col).map(|s| s.as_str()).unwrap_or(""); + truncate_to_width(val, w) + }) + .collect(); + writeln!(out, " {}", cells.join(" ")).unwrap(); + } + + out + } + + fn truncate_to_width(s: &str, width: usize) -> String { + let char_count = s.chars().count(); + if char_count <= width { + format!("{: 1 { + let truncated: String = s.chars().take(width - 1).collect(); + format!("{truncated}…") + } else { + "…".to_string() + } } } -/// Format an analysis status string with colour. -pub fn format_status(status: &str) -> String { - match status { - "success" => style(status).green().to_string(), - "pending" => style(status).dim().to_string(), - "in-progress" => style(status).cyan().to_string(), - "canceled" => style(status).yellow().to_string(), - "error" => style(status).red().to_string(), - other => other.to_string(), +mod csv { + use super::items; + use serde_json::Value; + use std::fmt::Write as FmtWrite; + + pub fn format(value: &Value) -> String { + format_page(value, true) + } + + pub fn format_page(value: &Value, include_header: bool) -> String { + let extracted = items::extract(value); + + if extracted.is_empty() { + return String::new(); + } + + let columns = items::collect_columns(&extracted); + let rows: Vec<_> = extracted.iter().map(|v| items::flatten(v)).collect(); + + let mut out = String::new(); + + if include_header { + let header: Vec = columns.iter().map(|c| escape(c)).collect(); + writeln!(out, "{}", header.join(",")).unwrap(); + } + + for row in &rows { + let cells: Vec = columns + .iter() + .map(|col| { + let val = row.get(col).map(|s| s.as_str()).unwrap_or(""); + escape(val) + }) + .collect(); + writeln!(out, "{}", cells.join(",")).unwrap(); + } + + out + } + + fn escape(s: &str) -> String { + if s.contains(',') || s.contains('"') || s.contains('\n') { + format!("\"{}\"", s.replace('"', "\"\"")) + } else { + s.to_string() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn format_value_json() { + let val = json!({"id": 1, "name": "test"}); + let out = format_value(&val, &Format::Json); + assert!(out.contains("\"id\": 1")); + assert!(out.contains("\"name\": \"test\"")); + } + + #[test] + fn format_value_table_array() { + let val = json!([ + {"id": "1", "name": "alice"}, + {"id": "2", "name": "bob"} + ]); + let out = format_value(&val, &Format::Table); + assert!(out.contains("id")); + assert!(out.contains("name")); + assert!(out.contains("alice")); + assert!(out.contains("bob")); + assert!(out.contains("───")); + } + + #[test] + fn format_value_table_nested_flattening() { + let val = json!([{"user": {"name": "Alice"}, "score": 42}]); + let out = format_value(&val, &Format::Table); + assert!(out.contains("user.name")); + assert!(out.contains("Alice")); + assert!(out.contains("42")); + } + + #[test] + fn format_value_table_extracts_from_wrapper() { + let val = json!({ + "total-findings": 100, + "findings": [{"id": "a"}, {"id": "b"}], + "_links": {"next": {"href": "http://..."}} + }); + let out = format_value(&val, &Format::Table); + assert!(out.contains("id")); + assert!(out.contains("a")); + assert!(out.contains("b")); + assert!(!out.contains("total-findings")); + assert!(!out.contains("_links")); + } + + #[test] + fn format_value_table_single_object() { + let val = json!({"status": "success", "score": 85}); + let out = format_value(&val, &Format::Table); + assert!(out.contains("status")); + assert!(out.contains("success")); + } + + #[test] + fn format_value_csv_array() { + let val = json!([ + {"id": "1", "name": "alice"}, + {"id": "2", "name": "bob"} + ]); + let out = format_value(&val, &Format::Csv); + let lines: Vec<&str> = out.lines().collect(); + assert_eq!(lines[0], "id,name"); + assert_eq!(lines[1], "1,alice"); + assert_eq!(lines[2], "2,bob"); + } + + #[test] + fn csv_escape_handles_commas_and_quotes() { + assert_eq!(csv::format(&json!([{"a": "hello,world"}])), "a\n\"hello,world\"\n"); + assert_eq!( + csv::format(&json!([{"a": "say \"hi\""}])), + "a\n\"say \"\"hi\"\"\"\n" + ); + } + + #[test] + fn format_value_csv_nested() { + let val = json!([{"user": {"name": "Alice"}, "score": 42}]); + let out = format_value(&val, &Format::Csv); + assert!(out.contains("user.name")); + assert!(out.contains("Alice")); + } + + #[test] + fn paginated_table_skips_header_on_subsequent_pages() { + let val = json!([{"id": "1"}]); + let first = format_value_paginated(&val, &Format::Table, true); + let second = format_value_paginated(&val, &Format::Table, false); + assert!(first.contains("id")); + assert!(first.contains("──")); + assert!(!second.contains("──")); + } + + #[test] + fn paginated_csv_skips_header_on_subsequent_pages() { + let val = json!([{"id": "1", "name": "a"}]); + let first = format_value_paginated(&val, &Format::Csv, true); + let second = format_value_paginated(&val, &Format::Csv, false); + assert!(first.starts_with("id,name")); + assert!(!second.contains("id,name")); + } + + #[test] + fn paginated_json_is_compact() { + let val = json!({"id": "1"}); + let out = format_value_paginated(&val, &Format::Json, true); + assert!(!out.contains('\n')); + } + + #[test] + fn empty_array_produces_no_data() { + let val = json!([]); + assert!(format_value(&val, &Format::Table).contains("(no data)")); + assert!(format_value(&val, &Format::Csv).is_empty()); } } From 48a162b4a8564d89e45025d1380ed19b36f9ea76 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 18 Mar 2026 15:52:45 +0100 Subject: [PATCH 13/38] feat(dommyrock-analyzer-cli): add test-cases crate and improve tests for --format --- Cargo.toml | 1 + src/api/executor.rs | 3 +- src/api/generate_skills.rs | 4 +- src/commands/scan.rs | 1 - src/output.rs | 187 ++++++++++++++++++++++++------------- 5 files changed, 128 insertions(+), 68 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1a838c8..a6d2474 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ clap_complete = "4" assert_cmd = "2" predicates = "3" tempfile = "3" +test-case = "3" wiremock = "0.6" [profile.release] diff --git a/src/api/executor.rs b/src/api/executor.rs index 2c78bd3..836a181 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -125,8 +125,7 @@ pub async fn execute_method( }; if pagination.page_all { - let out = - crate::output::format_value_paginated(&filtered, &format, pages_fetched == 1); + let out = crate::output::format_value_paginated(&filtered, &format, pages_fetched == 1); print!("{out}"); } else { let out = crate::output::format_value(&filtered, &format); diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 4a2559e..1a19968 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -101,7 +101,9 @@ pub fn write_skills_index(output_dir: &Path) -> Result<()> { let skill_file = entry.path().join("SKILL.md"); if skill_file.exists() { let description = if name.ends_with("-shared") { - format!("Shared patterns for authentication, global flags, and error handling ({name}).") + format!( + "Shared patterns for authentication, global flags, and error handling ({name})." + ) } else { format!("API operations for {name}.") }; diff --git a/src/commands/scan.rs b/src/commands/scan.rs index 1c9fe01..d3ea075 100644 --- a/src/commands/scan.rs +++ b/src/commands/scan.rs @@ -570,7 +570,6 @@ pub async fn run_results( Ok(()) } - // =========================================================================== // Compliance // =========================================================================== diff --git a/src/output.rs b/src/output.rs index 875a086..c9fe9a1 100644 --- a/src/output.rs +++ b/src/output.rs @@ -328,63 +328,100 @@ mod csv { mod tests { use super::*; use serde_json::json; + use test_case::test_case; - #[test] - fn format_value_json() { - let val = json!({"id": 1, "name": "test"}); - let out = format_value(&val, &Format::Json); - assert!(out.contains("\"id\": 1")); - assert!(out.contains("\"name\": \"test\"")); - } + // -- format_value: all formats preserve data -- - #[test] - fn format_value_table_array() { + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn format_value_array_contains_data(fmt: Format) { let val = json!([ {"id": "1", "name": "alice"}, {"id": "2", "name": "bob"} ]); - let out = format_value(&val, &Format::Table); - assert!(out.contains("id")); - assert!(out.contains("name")); - assert!(out.contains("alice")); - assert!(out.contains("bob")); - assert!(out.contains("───")); + let out = format_value(&val, &fmt); + assert!(out.contains("alice"), "missing 'alice' in {fmt:?}"); + assert!(out.contains("bob"), "missing 'bob' in {fmt:?}"); } - #[test] - fn format_value_table_nested_flattening() { + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn format_value_single_object(fmt: Format) { + let val = json!({"status": "success", "score": 85}); + let out = format_value(&val, &fmt); + assert!(out.contains("success"), "missing 'success' in {fmt:?}"); + assert!(out.contains("85"), "missing '85' in {fmt:?}"); + } + + // -- nested flattening: Table and Csv flatten, Json/Human keep nested -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn nested_objects_are_flattened(fmt: Format) { let val = json!([{"user": {"name": "Alice"}, "score": 42}]); - let out = format_value(&val, &Format::Table); - assert!(out.contains("user.name")); + let out = format_value(&val, &fmt); + assert!(out.contains("user.name"), "missing dot-notation in {fmt:?}"); assert!(out.contains("Alice")); assert!(out.contains("42")); } - #[test] - fn format_value_table_extracts_from_wrapper() { + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + fn nested_objects_preserved_in_json(fmt: Format) { + let val = json!([{"user": {"name": "Alice"}}]); + let out = format_value(&val, &fmt); + // Json/Human keep the nested structure, no dot-notation + assert!(out.contains("\"name\": \"Alice\"")); + assert!(!out.contains("user.name")); + } + + // -- wrapper extraction: Table and Csv extract arrays from response wrappers -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn extracts_array_from_wrapper(fmt: Format) { let val = json!({ "total-findings": 100, "findings": [{"id": "a"}, {"id": "b"}], "_links": {"next": {"href": "http://..."}} }); + let out = format_value(&val, &fmt); + assert!(out.contains("a"), "missing item 'a' in {fmt:?}"); + assert!(out.contains("b"), "missing item 'b' in {fmt:?}"); + assert!( + !out.contains("total-findings"), + "metadata leaked in {fmt:?}" + ); + assert!(!out.contains("_links"), "metadata leaked in {fmt:?}"); + } + + // -- table-specific formatting -- + + #[test] + fn table_has_header_and_separator() { + let val = json!([{"id": "1", "name": "alice"}]); let out = format_value(&val, &Format::Table); assert!(out.contains("id")); - assert!(out.contains("a")); - assert!(out.contains("b")); - assert!(!out.contains("total-findings")); - assert!(!out.contains("_links")); + assert!(out.contains("name")); + assert!(out.contains("──")); } #[test] - fn format_value_table_single_object() { - let val = json!({"status": "success", "score": 85}); + fn table_truncates_long_values() { + let long = "x".repeat(100); + let val = json!([{"col": long}]); let out = format_value(&val, &Format::Table); - assert!(out.contains("status")); - assert!(out.contains("success")); + assert!(out.contains('…'), "long value should be truncated"); } + // -- csv-specific formatting -- + #[test] - fn format_value_csv_array() { + fn csv_has_header_row_and_data_rows() { let val = json!([ {"id": "1", "name": "alice"}, {"id": "2", "name": "bob"} @@ -397,52 +434,74 @@ mod tests { } #[test] - fn csv_escape_handles_commas_and_quotes() { - assert_eq!(csv::format(&json!([{"a": "hello,world"}])), "a\n\"hello,world\"\n"); - assert_eq!( - csv::format(&json!([{"a": "say \"hi\""}])), - "a\n\"say \"\"hi\"\"\"\n" + fn csv_escapes_special_characters() { + let val = json!([{"a": "hello,world"}, {"a": "say \"hi\""}]); + let out = format_value(&val, &Format::Csv); + let lines: Vec<&str> = out.lines().collect(); + assert_eq!(lines[1], "\"hello,world\""); + assert_eq!(lines[2], "\"say \"\"hi\"\"\""); + } + + // -- pagination: header behaviour across all formats -- + + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn paginated_first_page_includes_header(fmt: Format) { + let val = json!([{"id": "1", "name": "a"}]); + let first = format_value_paginated(&val, &fmt, true); + assert!(first.contains("id"), "first page missing header in {fmt:?}"); + assert!( + first.contains("name"), + "first page missing header in {fmt:?}" ); } - #[test] - fn format_value_csv_nested() { - let val = json!([{"user": {"name": "Alice"}, "score": 42}]); - let out = format_value(&val, &Format::Csv); - assert!(out.contains("user.name")); - assert!(out.contains("Alice")); + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn paginated_subsequent_pages_skip_header(fmt: Format) { + let val = json!([{"id": "1", "name": "a"}]); + let first = format_value_paginated(&val, &fmt, true); + let second = format_value_paginated(&val, &fmt, false); + // Data present in both pages + assert!(second.contains("1"), "data missing on page 2 in {fmt:?}"); + // Header line count differs + let first_lines = first.lines().count(); + let second_lines = second.lines().count(); + assert!( + first_lines > second_lines, + "page 2 should have fewer lines (no header) in {fmt:?}" + ); } - #[test] - fn paginated_table_skips_header_on_subsequent_pages() { - let val = json!([{"id": "1"}]); - let first = format_value_paginated(&val, &Format::Table, true); - let second = format_value_paginated(&val, &Format::Table, false); - assert!(first.contains("id")); - assert!(first.contains("──")); - assert!(!second.contains("──")); + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + fn paginated_json_is_compact_ndjson(fmt: Format) { + let val = json!({"id": "1", "name": "test"}); + let out = format_value_paginated(&val, &fmt, true); + assert!( + !out.contains('\n'), + "NDJSON should be single-line in {fmt:?}" + ); } - #[test] - fn paginated_csv_skips_header_on_subsequent_pages() { - let val = json!([{"id": "1", "name": "a"}]); - let first = format_value_paginated(&val, &Format::Csv, true); - let second = format_value_paginated(&val, &Format::Csv, false); - assert!(first.starts_with("id,name")); - assert!(!second.contains("id,name")); + // -- empty input -- + + #[test_case(Format::Json ; "json")] + #[test_case(Format::Human ; "human")] + #[test_case(Format::Table ; "table")] + #[test_case(Format::Csv ; "csv")] + fn empty_array_does_not_panic(fmt: Format) { + let val = json!([]); + let _ = format_value(&val, &fmt); // should not panic } #[test] - fn paginated_json_is_compact() { - let val = json!({"id": "1"}); - let out = format_value_paginated(&val, &Format::Json, true); - assert!(!out.contains('\n')); + fn empty_array_table_shows_no_data() { + assert!(format_value(&json!([]), &Format::Table).contains("(no data)")); } #[test] - fn empty_array_produces_no_data() { - let val = json!([]); - assert!(format_value(&val, &Format::Table).contains("(no data)")); - assert!(format_value(&val, &Format::Csv).is_empty()); + fn empty_array_csv_is_empty() { + assert!(format_value(&json!([]), &Format::Csv).is_empty()); } } From 6f3ecdec7337d2ddd70191d27333d6f261a4db96 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 18 Mar 2026 15:53:51 +0100 Subject: [PATCH 14/38] feat(dommyrock-analyzer-cli): rm *discovery.json file from git --- analyzer-discovery.json | 1606 --------------------------------------- 1 file changed, 1606 deletions(-) delete mode 100644 analyzer-discovery.json diff --git a/analyzer-discovery.json b/analyzer-discovery.json deleted file mode 100644 index aedb4a7..0000000 --- a/analyzer-discovery.json +++ /dev/null @@ -1,1606 +0,0 @@ -{ - "kind": "discovery#restDescription", - "discoveryVersion": "v1", - "id": "analyzer-api-routes:0.5.0", - "name": "analyzer-api-routes", - "version": "0.5.0", - "title": "Analyzer API routes", - "description": "", - "protocol": "rest", - "rootUrl": "", - "servicePath": "", - "schemas": { - "AiResult": { - "id": "AiResult", - "properties": { - "reasoning": { - "description": "AI reasoning, which lead to current status", - "type": "string" - }, - "sources": { - "description": "List of documents used by AI to produce current status.", - "items": { "$ref": "UserUploadedDocument" }, - "type": "array" - }, - "status": { "$ref": "AiStatus" }, - "user-action": { "$ref": "SuggestionResponse" } - }, - "type": "object" - }, - "AiStatus": { - "description": "Represents the status of a requirement determined by ai", - "enum": ["passed", "failed", "unknown"], - "id": "AiStatus", - "type": "string" - }, - "AiSuggestionStatus": { - "description": "Status of the AI suggestions computation.", - "id": "AiSuggestionStatus", - "properties": { "status": { "$ref": "Status" } }, - "type": "object" - }, - "AnalysisFilter": { - "id": "AnalysisFilter", - "properties": { - "query-name": { "$ref": "QueryName" }, - "values": { - "description": "Avaliable filter values with their count.", - "items": { "$ref": "FilterValue" }, - "type": "array" - } - }, - "type": "object" - }, - "AnalysisFindings": { - "description": "Wrapper type similar to AnalysisResult, but it contains only `findings`\nportion of analysis.", - "id": "AnalysisFindings" - }, - "AnalysisId": { - "description": "A wrapper struct `AnalysisId` around a UUID.\n ID in the analysis table.", - "format": "uuid", - "id": "AnalysisId", - "type": "string" - }, - "AnalysisInfo": { - "description": "Helper struct to define if a analysis should be by default enabled", - "id": "AnalysisInfo", - "properties": { "default": { "type": "boolean" }, "type": { "type": "string" } }, - "type": "object" - }, - "AnalysisOverview": { - "description": "Like [`ScanOverview`] but for single analysis.", - "id": "AnalysisOverview" - }, - "AnalysisQueryUnion": { - "description": "Union of all available query parameters for analyses.", - "id": "AnalysisQueryUnion" - }, - "AnalysisResultDTO": { - "description": "AnalysisResult but with count of all findings,\nbefore pagination was applied.", - "id": "AnalysisResultDTO", - "properties": { - "filters": { - "description": "Filters that can be used in this analysis.", - "type": "object" - }, - "findings": { "$ref": "AnalysisFindings" }, - "total-findings": { - "description": "Total count of findings _after_ filtering, but _before_ pagination.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "AnalysisScore": { - "description": "The score of an analysis,", - "id": "AnalysisScore", - "properties": { - "id": { "$ref": "AnalysisId" }, - "score": { "$ref": "Score" }, - "type": { "$ref": "AnalysisType" } - }, - "type": "object" - }, - "AnalysisState": { - "description": "A analysis that runs for one particular system image.", - "id": "AnalysisState", - "properties": { - "id": { "$ref": "AnalysisId" }, - "status": { "$ref": "AnalysisStatus" }, - "type": { "$ref": "ScanType" } - }, - "type": "object" - }, - "AnalysisStatus": { - "description": "Represents the current execution status of an analysis task.", - "enum": ["success", "pending", "in-progress", "canceled", "error"], - "id": "AnalysisStatus", - "type": "string" - }, - "AnalysisType": { - "description": "Type of the analysis", - "enum": [ - "info", - "kernel", - "cve", - "password-hash", - "hardening", - "malware", - "software-bom", - "crypto", - "capabilities", - "symbols", - "tasks", - "stack-overflow" - ], - "id": "AnalysisType", - "type": "string" - }, - "AnalyzerResult": { - "id": "AnalyzerResult", - "properties": { "status": { "$ref": "AnalyzerStatus" } }, - "type": "object" - }, - "AnalyzerStatus": { - "description": "Represents the status of a requirement determined by analyzer", - "enum": ["passed", "failed", "unknown", "not-applicable"], - "id": "AnalyzerStatus", - "type": "string" - }, - "ApiScanType": { - "description": "List of available analysis types per image type.\n\nThis includes the information if a analysis type should be scheduled by default or not.\n\n# Note\n\nThis is used by the frontend to determine which analysis has to be scheduled implicitly\nand which types are optional.", - "id": "ApiScanType" - }, - "BindFilter": { "enum": ["local", "global", "weak"], "id": "BindFilter", "type": "string" }, - "CapabilitiesOverview": { - "description": "Overview for Capability analysis.", - "id": "CapabilitiesOverview", - "properties": { - "capabilities": { - "description": "Capability found and their number of occurrences.", - "type": "object" - }, - "counts": { "$ref": "RiskLevelCount" }, - "executable_count": { - "description": "Total number executables.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CapabilityParams": { - "id": "CapabilityParams", - "properties": { - "search": { "type": "string" }, - "severity-filter": { "items": { "$ref": "SeverityFilter" }, "type": "array" }, - "sort-by": { "$ref": "CapabilitySortBy" }, - "sort-ord": { "$ref": "SortOrd" } - }, - "type": "object" - }, - "CapabilitySortBy": { "enum": ["severity"], "id": "CapabilitySortBy", "type": "string" }, - "Checks": { - "description": "Represents the checks performed in the report", - "id": "Checks", - "properties": { - "failed": { - "description": "Number of checks that failed (determined either by analyzer or overwritten by the user)", - "format": "int32", - "type": "integer" - }, - "not-applicable": { - "description": "Number of not applicable requirements", - "format": "int32", - "type": "integer" - }, - "passed": { - "description": "Number of checks that passed (determined either by analyzer or overwritten by the user)", - "format": "int32", - "type": "integer" - }, - "suggestion-available": { - "description": "Number of checks for which AI suggestion is available.\n\nIt does not include user accepted or rejected suggestions.", - "format": "int32", - "type": "integer" - }, - "total": { - "description": "Total number of checks performed", - "format": "int32", - "type": "integer" - }, - "unknown": { - "description": "Number of checks that analyzer was unable to determine\n(or ai didn't give conclusive suggestion).\n\nNote that this will also include those requirements,\nthat have ai suggestion available, but user has not approved or rejected it yet.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ComponentType": { - "enum": [ - "application", - "framework", - "library", - "container", - "operating-system", - "device", - "firmware", - "file" - ], - "id": "ComponentType", - "type": "string" - }, - "CreateObject": { - "description": "The request to create a new object.", - "id": "CreateObject", - "properties": { - "description": { "description": "Description of the object.", "type": "string" }, - "name": { "description": "Name of the object.", "type": "string" }, - "tags": { - "description": "Tags associated with the object.", - "items": { "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "CryptoOverview": { - "description": "Overview for Crypto analysis.", - "id": "CryptoOverview", - "properties": { - "certificates": { - "description": "Number of certificates found.", - "format": "int64", - "type": "integer" - }, - "private_keys": { - "description": "Number of private keys found.", - "format": "int64", - "type": "integer" - }, - "public_keys": { - "description": "Number of public keys found.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "CryptoParams": { - "id": "CryptoParams", - "properties": { - "search": { "type": "string" }, - "sort-by": { "$ref": "CryptoSortBy" }, - "sort-ord": { "$ref": "SortOrd" }, - "type-filter": { "items": { "$ref": "CryptoTypeFilter" }, "type": "array" } - }, - "type": "object" - }, - "CryptoSortBy": { - "enum": ["type", "key-size", "filename", "path", "issuer"], - "id": "CryptoSortBy", - "type": "string" - }, - "CryptoTypeFilter": { - "enum": ["certificate", "private-key", "public-key"], - "id": "CryptoTypeFilter", - "type": "string" - }, - "CveOverview": { - "description": "Overview for Cve analysis.", - "id": "CveOverview", - "properties": { - "counts": { "$ref": "CveSeverityCount" }, - "products": { - "description": "Cve counts for each \"product\" (binary, library, etc.).", - "type": "object" - }, - "total": { "description": "Sum of all `counts`.", "format": "int64", "type": "integer" } - }, - "type": "object" - }, - "CveParams": { - "id": "CveParams", - "properties": { - "patch-filter": { "items": { "$ref": "CvePatchFilter" }, "type": "array" }, - "search": { "type": "string" }, - "severity-filter": { "items": { "$ref": "CveSeverityFilter" }, "type": "array" }, - "sort-by": { "$ref": "CveSortBy" }, - "sort-ord": { "$ref": "SortOrd" } - }, - "type": "object" - }, - "CvePatchFilter": { - "enum": ["available", "unavailable"], - "id": "CvePatchFilter", - "type": "string" - }, - "CveSeverityCount": { - "description": "Maps CVE severity to its count", - "id": "CveSeverityCount", - "properties": { - "critical": { "format": "int64", "type": "integer" }, - "high": { "format": "int64", "type": "integer" }, - "low": { "format": "int64", "type": "integer" }, - "medium": { "format": "int64", "type": "integer" }, - "unknown": { "format": "int64", "type": "integer" } - }, - "type": "object" - }, - "CveSeverityFilter": { - "enum": ["low", "medium", "high", "critical"], - "id": "CveSeverityFilter", - "type": "string" - }, - "CveSortBy": { "enum": ["severity"], "id": "CveSortBy", "type": "string" }, - "CyberResilienceActReport": { - "description": "Represents a Cyber Resilience Act report", - "id": "CyberResilienceActReport", - "properties": { - "checks": { "$ref": "Checks" }, - "created-at": { - "description": "Date and time when the report was created.", - "format": "date-time", - "type": "string" - }, - "name": { "description": "Name of the report.", "type": "string" }, - "sections": { - "description": "List of categories in the report.", - "items": { "$ref": "Section" }, - "type": "array" - }, - "updated-at": { - "description": "Date and time of last report update.\n\nIf no update has happened yet, for example after report was generated\nand before any user overwrite, this will be `null`.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "DockerAnalysis": { - "description": "Represents different types of analyses for Docker containers.", - "enum": [ - "info", - "cve", - "password-hash", - "crypto", - "software-bom", - "malware", - "hardening", - "capabilities" - ], - "id": "DockerAnalysis", - "type": "string" - }, - "DockerInfo": { - "description": "Container metadata information\n\nRepresents various metadata attributes of a container image", - "id": "DockerInfo", - "properties": { - "arch": { - "description": "List of supported CPU architectures for the container", - "items": { "type": "string" }, - "type": "array" - }, - "ctime": { - "description": "List of creation timestamps for container layers", - "items": { "type": "string" }, - "type": "array" - }, - "env": { - "description": "List of environment variables defined in the container", - "items": { "type": "string" }, - "type": "array" - }, - "history": { - "description": "List of commands used to build the container layers", - "items": { "$ref": "History" }, - "type": "array" - }, - "os": { - "description": "List of supported operating systems for the container", - "items": { "type": "string" }, - "type": "array" - }, - "os_name": { - "description": "Name of the base operating system used in the container", - "type": "string" - }, - "os_version": { - "description": "Version of the base operating system used in the container", - "type": "string" - }, - "tags": { - "description": "List of container image tags associated with the image", - "items": { "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "DockerInfoResult": { "description": "Info result for docker image", "id": "DockerInfoResult" }, - "DocumentListItem": { - "description": "A single document entry in a listing.", - "id": "DocumentListItem", - "properties": { - "file-name": { - "description": "Original file name, serves as the unique key within a scan's document storage", - "type": "string" - } - }, - "type": "object" - }, - "DocumentListResponse": { - "description": "A list of documents associated with a scan.", - "id": "DocumentListResponse", - "properties": { "documents": { "items": { "$ref": "DocumentListItem" }, "type": "array" } }, - "type": "object" - }, - "DocumentUploadResponse": { - "description": "The response after successfully uploading a document.", - "id": "DocumentUploadResponse", - "properties": { - "file-name": { - "description": "Original file name, serves as the unique key within a scan's document storage", - "type": "string" - } - }, - "type": "object" - }, - "FeaturesFilter": { - "enum": [ - "seccomp", - "seccomp-filter", - "security-network", - "stack-protector", - "fortify-source", - "vmap-kernel-stack", - "usercopy", - "heap-freelist-obfuscation", - "executable-memory-protection", - "kaslr", - "apparmor", - "selinux", - "smack", - "tomoyo", - "yama" - ], - "id": "FeaturesFilter", - "type": "string" - }, - "FilterValue": { - "id": "FilterValue", - "properties": { - "count": { - "description": "Count of findings matching this value for current filter options.", - "format": "int64", - "type": "integer" - }, - "value": { - "description": "Filter value that can be passed in query paramters.", - "type": "string" - } - }, - "type": "object" - }, - "HardeningOverview": { - "description": "Overview for Hardening analysis.", - "id": "HardeningOverview", - "properties": { - "counts": { "$ref": "HardeningSeverityCount" }, - "total": { "description": "Sum of all `counts`.", "format": "int64", "type": "integer" } - }, - "type": "object" - }, - "HardeningParams": { - "id": "HardeningParams", - "properties": { - "search": { "type": "string" }, - "severity-filter": { "items": { "$ref": "HardeningSeverityFilter" }, "type": "array" }, - "sort-by": { "$ref": "HardeningSortBy" }, - "sort-ord": { "$ref": "SortOrd" } - }, - "type": "object" - }, - "HardeningSeverityCount": { - "description": "Maps Hardening severity to its count", - "id": "HardeningSeverityCount", - "properties": { - "high": { "format": "int64", "type": "integer" }, - "low": { "format": "int64", "type": "integer" }, - "medium": { "format": "int64", "type": "integer" } - }, - "type": "object" - }, - "HardeningSeverityFilter": { - "enum": ["low", "medium", "high"], - "id": "HardeningSeverityFilter", - "type": "string" - }, - "HardeningSortBy": { - "enum": ["severity", "filename", "canary", "nx", "pie", "relro", "fortify"], - "id": "HardeningSortBy", - "type": "string" - }, - "HealthStatus": { - "description": "Health status of an application.\n\nIt contains an overall `healthy` field but can also provide\nthe status of individual components or an error message.\nIf the status is not healthy a Http status code of 500 will be returned.", - "id": "HealthStatus", - "properties": { "healthy": { "type": "boolean" }, "message": { "type": "string" } }, - "type": "object" - }, - "History": { - "id": "History", - "properties": { - "created": { "format": "date-time", "type": "string" }, - "created_by": { "type": "string" }, - "empty_layer": { "type": "boolean" } - }, - "type": "object" - }, - "IdfAnalysis": { - "description": "Represents analyses specific to IDF (IoT Device Framework) targets.", - "enum": ["info", "cve", "software-bom", "symbols", "tasks", "stack-overflow"], - "id": "IdfAnalysis", - "type": "string" - }, - "IdfInfo": { - "description": "IdfInfo analysis entry for idf image", - "id": "IdfInfo", - "properties": { - "arch": { "description": "Architecture type", "type": "string" }, - "compiler": { - "description": "Compiler name and version used to create this image", - "type": "string" - }, - "freertos": { "description": "freertos version", "type": "string" }, - "idf": { "description": "idf version", "type": "string" } - }, - "type": "object" - }, - "IdfInfoResult": { "description": "Info result for idf image", "id": "IdfInfoResult" }, - "IdfSymbolParams": { - "id": "IdfSymbolParams", - "properties": { - "bind-filter": { "items": { "$ref": "BindFilter" }, "type": "array" }, - "search": { "type": "string" }, - "sort-by": { "$ref": "IdfSymbolSortBy" }, - "sort-ord": { "$ref": "SortOrd" }, - "type-filter": { "items": { "$ref": "TypeFilter" }, "type": "array" } - }, - "type": "object" - }, - "IdfSymbolSortBy": { "enum": ["name"], "id": "IdfSymbolSortBy", "type": "string" }, - "IdfTaskParams": { - "id": "IdfTaskParams", - "properties": { - "search": { "type": "string" }, - "sort-by": { "$ref": "IdfTaskSortBy" }, - "sort-ord": { "$ref": "SortOrd" } - }, - "type": "object" - }, - "IdfTaskSortBy": { "enum": ["function", "name"], "id": "IdfTaskSortBy", "type": "string" }, - "Image": { - "description": "A image on which a scan is executed", - "id": "Image", - "properties": { - "file_name": { - "description": "The original name of the file as provided when the image was uploaded.\nThis is typically used for display or reference purposes and may not be unique.", - "type": "string" - }, - "id": { "$ref": "ImageId" } - }, - "type": "object" - }, - "ImageId": { - "description": "A wrapper struct `ImageId` around a UUID.\n ID in the images table.", - "format": "uuid", - "id": "ImageId", - "type": "string" - }, - "ImageType": { - "description": "Type of the image used in scan", - "enum": ["linux", "docker", "idf"], - "id": "ImageType", - "type": "string" - }, - "Info": { "id": "Info" }, - "InfoOverview": { "id": "InfoOverview" }, - "KernelOverview": { - "description": "Overview for Kernel analysis.", - "id": "KernelOverview", - "properties": { - "count": { - "description": "Number of kernel security features enabled.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "KernelParams": { - "id": "KernelParams", - "properties": { - "features-filter": { "items": { "$ref": "FeaturesFilter" }, "type": "array" }, - "sort-by": { "$ref": "KernelSortBy" }, - "sort-ord": { "$ref": "SortOrd" }, - "status-filter": { "items": { "$ref": "StatusFilter" }, "type": "array" } - }, - "type": "object" - }, - "KernelSortBy": { "enum": ["features", "status"], "id": "KernelSortBy", "type": "string" }, - "LinuxAnalysis": { - "description": "Represents different types of analyses that can be performed on a Linux system.", - "enum": [ - "info", - "kernel", - "cve", - "password-hash", - "crypto", - "software-bom", - "malware", - "hardening", - "capabilities" - ], - "id": "LinuxAnalysis", - "type": "string" - }, - "LinuxInfo": { - "description": "Represents the information about the system", - "id": "LinuxInfo", - "properties": { - "arch": { "description": "The tags associated with the system", "type": "string" }, - "banner": { "description": "The operating system name", "type": "string" }, - "kernel_version": { "description": "The kernel version", "type": "string" }, - "libc": { "description": "The operating system version", "type": "string" } - }, - "type": "object" - }, - "LinuxInfoResult": { "description": "Info result for linux image", "id": "LinuxInfoResult" }, - "MalwareOverview": { - "description": "Overview for Malware analysis.", - "id": "MalwareOverview", - "properties": { - "count": { - "description": "Number of malware detected.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "MalwareParams": { - "id": "MalwareParams", - "properties": { "sort-by": { "$ref": "MalwareSortBy" }, "sort-ord": { "$ref": "SortOrd" } }, - "type": "object" - }, - "MalwareSortBy": { "enum": ["filename"], "id": "MalwareSortBy", "type": "string" }, - "NewScanResponse": { - "description": "The response if a new scan is created.", - "id": "NewScanResponse", - "properties": { "id": { "$ref": "ScanId" } }, - "type": "object" - }, - "ObjectId": { - "description": "A wrapper struct `ObjectId` around a UUID.\n ID in the objects table.", - "format": "uuid", - "id": "ObjectId", - "type": "string" - }, - "PasswordHashOverview": { - "description": "Overview for Password Hash analysis.", - "id": "PasswordHashOverview", - "properties": { - "count": { - "description": "Number of passwords decoded.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "PasswordHashParams": { - "id": "PasswordHashParams", - "properties": { - "severity-filter": { "items": { "$ref": "PasswordHashSeverityFilter" }, "type": "array" }, - "sort-by": { "$ref": "PasswordHashSortBy" }, - "sort-ord": { "$ref": "SortOrd" } - }, - "type": "object" - }, - "PasswordHashSeverityFilter": { - "enum": ["medium", "high"], - "id": "PasswordHashSeverityFilter", - "type": "string" - }, - "PasswordHashSortBy": { - "enum": ["severity", "username"], - "id": "PasswordHashSortBy", - "type": "string" - }, - "QueryName": { - "description": "Query parameter names for analysis filter types.\n\nNOTE: serialization values *MUST* match serialization structure\nof filter fields in QueryParameter types.", - "enum": ["license-filter"], - "id": "QueryName", - "type": "string" - }, - "Requirement": { - "description": "Represents a requirement in the report", - "id": "Requirement", - "properties": { - "advice": { - "description": "Human readable hint explaining how to pass this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the advice for the original status.", - "type": "string" - }, - "ai-suggestion": { "$ref": "AiResult" }, - "analyzer": { "$ref": "AnalyzerResult" }, - "description": { "description": "Description of the requirement.", "type": "string" }, - "explanation": { - "description": "Human readable explanation of the status of this requirement.\n\nIn the case of \"with-suggestion\" status,\nthis will be the explanation for the original status.", - "type": "string" - }, - "id": { "$ref": "RequirementId" }, - "policy-ref": { - "description": "Reference to the policy associated with the requirement.", - "type": "string" - }, - "status": { "$ref": "RequirementStatus" }, - "user-overwrite": { "$ref": "UserResult" } - }, - "type": "object" - }, - "RequirementId": { - "description": "Id of Requirement\n\nThis id will be used to communicate between backend and fronted the semantic\nmeaning of requirement, as well as for overwriting specific requirement status by user.", - "enum": [ - "cve-exploits", - "password-strength", - "security-updates", - "update-notifications", - "access-control", - "unauthorized-access", - "data-encryption", - "data-integrity", - "data-collection", - "essential-availability", - "minimise-impact", - "attack-surfaces", - "attack-reduction", - "activity-monitoring", - "data-removal", - "vulns-documentation", - "vulns-security-updates", - "update-security-and-automation", - "security-testing-and-review", - "fixed-vulns-disclosure", - "vulns-coordinated-disclosure", - "vulns-reporting-contact", - "security-updates-dissemination" - ], - "id": "RequirementId", - "type": "string" - }, - "RequirementOverwrite": { - "description": "User action on a CRA requirement — either a manual overwrite or an AI suggestion response.", - "id": "RequirementOverwrite" - }, - "RequirementStatus": { - "description": "Overall status of the requirement\ncomputed by taking into account all user interactions.", - "enum": ["passed", "failed", "unknown", "unknown-with-suggestion", "not-applicable"], - "id": "RequirementStatus", - "type": "string" - }, - "RiskLevelCount": { - "description": "Count all different risk levels of the analysis.", - "id": "RiskLevelCount", - "properties": { - "critical": { "format": "int64", "type": "integer" }, - "high": { "format": "int64", "type": "integer" }, - "low": { "format": "int64", "type": "integer" }, - "medium": { "format": "int64", "type": "integer" }, - "none": { "format": "int64", "type": "integer" }, - "unknown": { "format": "int64", "type": "integer" } - }, - "type": "object" - }, - "SbomParams": { - "id": "SbomParams", - "properties": { - "license-filter": { "items": { "type": "string" }, "type": "array" }, - "search": { "type": "string" }, - "sort-by": { "$ref": "SbomSortBy" }, - "sort-ord": { "$ref": "SortOrd" }, - "type-filter": { "items": { "$ref": "ComponentType" }, "type": "array" } - }, - "type": "object" - }, - "SbomSortBy": { "enum": ["name"], "id": "SbomSortBy", "type": "string" }, - "Scan": { - "description": "Represents a scan that aggregates multiple analyses executed on a particular image.", - "id": "Scan", - "properties": { - "analysis": { - "description": "All analyses processed as part of this scan.", - "items": { "$ref": "AnalysisState" }, - "type": "array" - }, - "created": { - "description": "The date and time when the scan was initiated.", - "format": "date-time", - "type": "string" - }, - "id": { "$ref": "ScanId" }, - "image": { "$ref": "Image" }, - "image_type": { "$ref": "ImageType" }, - "info": { "$ref": "Info" }, - "score": { "$ref": "ScanScore" } - }, - "type": "object" - }, - "ScanId": { - "description": "A wrapper struct `ScanId` around a UUID.\n ID in the scans table.", - "format": "uuid", - "id": "ScanId", - "type": "string" - }, - "ScanOverview": { - "description": "Response object for `/scans/:id/overview` endpoint.\n\nSee [module's](super) documentation for more information\nabout schema and computation logic.", - "id": "ScanOverview", - "properties": { - "capabilities": { "$ref": "CapabilitiesOverview" }, - "crypto": { "$ref": "CryptoOverview" }, - "cve": { "$ref": "CveOverview" }, - "hardening": { "$ref": "HardeningOverview" }, - "info": { "$ref": "InfoOverview" }, - "kernel": { "$ref": "KernelOverview" }, - "malware": { "$ref": "MalwareOverview" }, - "password-hash": { "$ref": "PasswordHashOverview" }, - "software-bom": { "$ref": "SoftwareBOMOverview" }, - "stack-overflow": { "$ref": "StackOverflowOverview" }, - "symbols": { "$ref": "SymbolsOverview" }, - "tasks": { "$ref": "TasksOverview" } - }, - "type": "object" - }, - "ScanScore": { - "description": "The calculate score with an weighted algorithm over all analysis.", - "id": "ScanScore", - "properties": { - "score": { "$ref": "Score" }, - "scores": { - "description": "Individual analyses scores.", - "items": { "$ref": "AnalysisScore" }, - "type": "array" - } - }, - "type": "object" - }, - "ScanStatus": { - "description": "The status of a [`Scan`](analyzer_db::repository::scan::Scan)\nand all the [`Analysis`](analyzer_db::repository::analysis::Analysis).", - "id": "ScanStatus", - "properties": { "id": { "$ref": "ScanId" }, "status": { "$ref": "AnalysisStatus" } }, - "type": "object" - }, - "ScanType": { - "description": "Represents a unified type for analyses across all supported images.", - "id": "ScanType" - }, - "Score": { - "description": "Represents a security impact score, ranging from 0 to 100.\n\nA higher value indicates a greater security impact.", - "format": "int32", - "id": "Score", - "type": "integer" - }, - "Section": { - "description": "Represents a group of requirements, grouped by [SubSection]s.", - "id": "Section", - "properties": { - "label": { "description": "Name of the requirement", "type": "string" }, - "policy-ref": { - "description": "Reference to the policy associated with the requirement", - "type": "string" - }, - "sub-sections": { - "description": "List of sub-requirements or checks associated with this requirement", - "items": { "$ref": "SubSection" }, - "type": "array" - } - }, - "type": "object" - }, - "SeverityFilter": { - "enum": ["none", "low", "medium", "high", "critical", "unknown"], - "id": "SeverityFilter", - "type": "string" - }, - "SoftwareBOMOverview": { - "description": "Overview for Software BOM analysis.", - "id": "SoftwareBOMOverview", - "properties": { - "count": { - "description": "Total number of software BOM entries.", - "format": "int64", - "type": "integer" - }, - "licenses": { - "description": "License type and their number of occurrences.", - "type": "object" - } - }, - "type": "object" - }, - "SortOrd": { "enum": ["asc", "desc"], "id": "SortOrd", "type": "string" }, - "StackOverflowOverview": { - "description": "Overview for Stack Overflow analysis.", - "id": "StackOverflowOverview", - "properties": { - "method": { - "description": "Name of the protection method used,\nor `None` if stack overflow protection is not enabled.", - "type": "string" - } - }, - "type": "object" - }, - "Status": { - "description": "Status of the AI suggestions computation.", - "enum": ["in-progress", "finished"], - "id": "Status", - "type": "string" - }, - "StatusFilter": { "enum": ["enabled", "disabled"], "id": "StatusFilter", "type": "string" }, - "SubSection": { - "description": "Represents a group of requirements", - "id": "SubSection", - "properties": { - "label": { "description": "Name of the requirement", "type": "string" }, - "requirements": { - "description": "List of sub-requirements or checks associated with this requirement", - "items": { "$ref": "Requirement" }, - "type": "array" - } - }, - "type": "object" - }, - "SuggestionResponse": { - "description": "User response to AI suggestion.", - "enum": ["accepted", "rejected"], - "id": "SuggestionResponse", - "type": "string" - }, - "SymbolsOverview": { - "description": "Overview for Symbol analysis.", - "id": "SymbolsOverview", - "properties": { - "count": { - "description": "Number of analyzed symbols.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "TasksOverview": { - "description": "Overview for Task analysis.", - "id": "TasksOverview", - "properties": { - "count": { - "description": "Number of analysed tasks.", - "format": "int64", - "type": "integer" - } - }, - "type": "object" - }, - "TypeFilter": { - "enum": ["sect", "func", "obj", "file", "notype"], - "id": "TypeFilter", - "type": "string" - }, - "UpdateObject": { - "description": "The request to update fields on an [`Object`].", - "id": "UpdateObject", - "properties": { - "description": { "description": "Description of the object.", "type": "string" }, - "favorite": { - "description": "Sets if the object is a favorite or not.", - "type": "boolean" - }, - "name": { "description": "Name of the object.", "type": "string" }, - "tags": { - "description": "The tags associated with the object.", - "items": { "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "UserId": { - "description": "A wrapper struct `UserId` around a UUID.\n ID in the users table.", - "format": "uuid", - "id": "UserId", - "type": "string" - }, - "UserResult": { - "id": "UserResult", - "properties": { "status": { "$ref": "UserStatus" } }, - "type": "object" - }, - "UserStatus": { - "description": "Represents the status of a requirement overwritten by the user", - "enum": ["passed", "failed"], - "id": "UserStatus", - "type": "string" - }, - "UserUploadedDocument": { - "description": "Description of the user provided file\nused by ai to give its suggestion.", - "id": "UserUploadedDocument", - "properties": { - "filename": { "description": "Name of the user uploaded file.", "type": "string" } - }, - "type": "object" - } - }, - "resources": { - "api": { - "resources": { - "health": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.health.list", - "httpMethod": "GET", - "path": "api/health", - "description": "Returns if the service is in an healthy state.", - "response": { "$ref": "HealthStatus" } - } - } - }, - "objects": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.objects.create", - "httpMethod": "POST", - "path": "api/objects", - "description": "Create new object", - "request": { "$ref": "CreateObject" } - }, - "delete": { - "id": "analyzer-api-routes.api.objects.delete", - "httpMethod": "DELETE", - "path": "api/objects/{id}", - "description": "Deletes a object and all related scans.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Unique identifier of the object to delete" - } - }, - "parameterOrder": ["id"] - }, - "get": { - "id": "analyzer-api-routes.api.objects.get", - "httpMethod": "GET", - "path": "api/objects/{id}", - "description": "Retrieve an object by its ID.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": ["id"] - }, - "list": { - "id": "analyzer-api-routes.api.objects.list", - "httpMethod": "GET", - "path": "api/objects", - "description": "Retrieve a list of all objects of the current user.", - "parameters": { - "end_timestamp": { - "type": "string", - "required": false, - "location": "query", - "description": "End timestamp for pagination.", - "format": "datetime" - }, - "id": { - "type": "string", - "required": false, - "location": "query", - "description": "Pagination cursor (UUID).", - "format": "uuid" - }, - "limit": { - "type": "integer", - "required": false, - "location": "query", - "description": "Maximum number of items per page.", - "format": "int32" - }, - "start_timestamp": { - "type": "string", - "required": false, - "location": "query", - "description": "Start timestamp for pagination.", - "format": "datetime" - } - } - }, - "update": { - "id": "analyzer-api-routes.api.objects.update", - "httpMethod": "PUT", - "path": "api/objects/{id}", - "description": "Update an object", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": ["id"], - "request": { "$ref": "UpdateObject" } - } - }, - "resources": { - "scans": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.objects.scans.list", - "httpMethod": "GET", - "path": "api/objects/{id}/scans", - "description": "Those scans could be", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Object ID" - } - }, - "parameterOrder": ["id"] - } - } - } - } - }, - "scans": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.create", - "httpMethod": "POST", - "path": "api/scans", - "description": "Schedule a new scan.", - "response": { "$ref": "NewScanResponse" } - }, - "delete": { - "id": "analyzer-api-routes.api.scans.delete", - "httpMethod": "DELETE", - "path": "api/scans/{id}", - "description": "Delete a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"] - }, - "get": { - "id": "analyzer-api-routes.api.scans.get", - "httpMethod": "GET", - "path": "api/scans/{id}", - "description": "Returns a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "Scan" } - }, - "list": { - "id": "analyzer-api-routes.api.scans.list", - "httpMethod": "GET", - "path": "api/scans", - "description": "Retrieve a list of scans." - } - }, - "resources": { - "cancel": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.cancel.create", - "httpMethod": "POST", - "path": "api/scans/{id}/cancel", - "description": "This can be used to cancel an already pending or running scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"] - } - } - }, - "compliance-check": { - "resources": { - "cyber-resilience-act": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act", - "description": "Computes compliance with Cyber Resilience Act", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "CyberResilienceActReport" } - } - }, - "resources": { - "ai-suggestion": { - "resources": { - "begin": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.begin.create", - "httpMethod": "POST", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/begin", - "description": "Triggers CRA AI suggestion using user-provided documents.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"] - } - } - }, - "status": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.ai-suggestion.status.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/ai-suggestion/status", - "description": "Returns status of the CRA AI suggestion.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "AiSuggestionStatus" } - } - } - } - } - }, - "overwrite": { - "methods": { - "overwrite_compliance_check_requirement": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.overwrite.overwrite_compliance_check_requirement", - "httpMethod": "PUT", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/overwrite", - "description": "Overwrites compliance check requirement", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"], - "request": { "$ref": "RequirementOverwrite" } - } - } - }, - "report": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.compliance-check.cyber-resilience-act.report.list", - "httpMethod": "GET", - "path": "api/scans/{id}/compliance-check/cyber-resilience-act/report", - "description": "Downloads Cyber Resilience Act compliance report as PDF", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"] - } - } - } - } - } - } - }, - "documents": { - "methods": { - "create": { - "id": "analyzer-api-routes.api.scans.documents.create", - "httpMethod": "POST", - "path": "api/scans/{id}/documents", - "description": "Upload a document for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "DocumentUploadResponse" } - }, - "delete": { - "id": "analyzer-api-routes.api.scans.documents.delete", - "httpMethod": "DELETE", - "path": "api/scans/{id}/documents/{file_name}", - "description": "Delete a single document for a scan.", - "parameters": { - "file_name": { - "type": "string", - "required": true, - "location": "path", - "description": "Document file name" - }, - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id", "file_name"] - }, - "delete_documents": { - "id": "analyzer-api-routes.api.scans.documents.delete_documents", - "httpMethod": "DELETE", - "path": "api/scans/{id}/documents", - "description": "Delete all documents for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"] - }, - "list": { - "id": "analyzer-api-routes.api.scans.documents.list", - "httpMethod": "GET", - "path": "api/scans/{id}/documents", - "description": "List documents for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "DocumentListResponse" } - } - } - }, - "overview": { - "methods": { - "get": { - "id": "analyzer-api-routes.api.scans.overview.get", - "httpMethod": "GET", - "path": "api/scans/{scan_id}/overview/{analysis_id}", - "description": "Returns an overview of one analysis.", - "parameters": { - "analysis_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Analysis ID" - }, - "scan_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["scan_id", "analysis_id"], - "response": { "$ref": "AnalysisOverview" } - }, - "list": { - "id": "analyzer-api-routes.api.scans.overview.list", - "httpMethod": "GET", - "path": "api/scans/{id}/overview", - "description": "Returns an aggregated overview of all analysis executed for one scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "ScanOverview" } - } - } - }, - "report": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.report.list", - "httpMethod": "GET", - "path": "api/scans/{id}/report", - "description": "Downloads a PDF security report for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"] - } - } - }, - "results": { - "methods": { - "get": { - "id": "analyzer-api-routes.api.scans.results.get", - "httpMethod": "GET", - "path": "api/scans/{scan_id}/results/{analysis_id}", - "description": "Retrieve the results of one specific analysis of a scan.", - "parameters": { - "analysis_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Analysis ID" - }, - "page": { - "type": "integer", - "required": false, - "location": "query", - "description": "Page number (must be > 0). If provided, `per-page` must also be provided.", - "format": "int32" - }, - "per-page": { - "type": "integer", - "required": false, - "location": "query", - "description": "Items per page (must be > 0). If provided, `page` must also be provided.", - "format": "int32" - }, - "query": { - "type": "string", - "required": true, - "location": "query", - "description": "Query parameters depend on the analysis type. Supported shapes: IDF task, other analysis types." - }, - "scan_id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["scan_id", "analysis_id"], - "response": { "$ref": "AnalysisResultDTO" } - } - } - }, - "sbom": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.sbom.list", - "httpMethod": "GET", - "path": "api/scans/{id}/sbom", - "description": "Downloads the SBOM (CycloneDX JSON) for a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID", - "format": "uuid" - } - }, - "parameterOrder": ["id"] - } - } - }, - "score": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.score.list", - "httpMethod": "GET", - "path": "api/scans/{id}/score", - "description": "Returns a security score of all successful finished analyses with their individual scores included.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "ScanScore" } - } - } - }, - "status": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.status.list", - "httpMethod": "GET", - "path": "api/scans/{id}/status", - "description": "Returns the status of a scan.", - "parameters": { - "id": { - "type": "string", - "required": true, - "location": "path", - "description": "Scan ID" - } - }, - "parameterOrder": ["id"], - "response": { "$ref": "ScanStatus" } - } - } - }, - "types": { - "methods": { - "list": { - "id": "analyzer-api-routes.api.scans.types.list", - "httpMethod": "GET", - "path": "api/scans/types", - "description": "Returns a list of all available analysis types for each different image." - } - } - } - } - } - } - } - } -} From e8affcdb04c37a0c8bc7562f4bf5395989c5093a Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 14:45:50 +0100 Subject: [PATCH 15/38] feat(dommyrock-analyzer-cli): claude skills and context discovery logic --- CONTEXT.md | 21 ++++ Cargo.toml | 1 + src/commands/init_agent.rs | 204 +++++++++++++++++++++++++++++++++++++ src/commands/mod.rs | 1 + src/discovery.rs | 54 +++++++--- src/main.rs | 10 ++ tests/discovery_cache.rs | 103 +++++++++++++++++++ 7 files changed, 378 insertions(+), 16 deletions(-) create mode 100644 src/commands/init_agent.rs create mode 100644 tests/discovery_cache.rs diff --git a/CONTEXT.md b/CONTEXT.md index 961f11a..39f99e7 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -8,6 +8,27 @@ The `analyzer` CLI provides dynamic access to firmware and software security API |-------|-------------| | `analyzer` | Firmware and software image security analysis | +## Agent Integration (Claude Code) + +To use the `analyzer` CLI with Claude Code (or other AI agents), run the one-time setup: + +```bash +analyzer init-agent +``` + +This installs skills, usage context, and permissions into `~/.claude/` so that Claude Code automatically discovers the `analyzer` CLI in every project — no source code or manual configuration needed. + +What it writes: + +| File | Purpose | +|------|---------| +| `~/.claude/skills/` | Per-resource API skill files (generated from discovery documents) | +| `~/.claude/CONTEXT.md` | This file — CLI syntax, flags, and usage patterns | +| `~/.claude/CLAUDE.md` | Entry point that tells Claude Code about the tool | +| `~/.claude/settings.json` | Allowlists the `analyzer` binary for Claude Code | + +Re-run `analyzer init-agent` after upgrading to refresh skills and context. + ## Rules of Engagement for Agents * **Schema First:** *If you don't know the exact JSON payload structure, run `analyzer schema ..` first to inspect the schema before executing.* diff --git a/Cargo.toml b/Cargo.toml index a6d2474..3f0697c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ clap_complete = "4" [dev-dependencies] assert_cmd = "2" +filetime = "0.2" predicates = "3" tempfile = "3" test-case = "3" diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs new file mode 100644 index 0000000..2ea7dc9 --- /dev/null +++ b/src/commands/init_agent.rs @@ -0,0 +1,204 @@ +//! `analyzer init-agent` — install Claude Code agent integration globally. +//! +//! Writes skills, CONTEXT.md, CLAUDE.md, and settings.json into `~/.claude/` +//! so that Claude Code automatically discovers the `analyzer` CLI in every project. + +use std::path::Path; + +use anyhow::{Context, Result}; + +use crate::api::generate_skills; +use crate::discovery; +use crate::output; +use crate::services; + +/// Embedded copy of the project-root CONTEXT.md, baked in at compile time. +const CONTEXT_MD: &str = include_str!("../../CONTEXT.md"); + +/// Minimal CLAUDE.md that tells Claude Code about the analyzer CLI. +const CLAUDE_MD_TEMPLATE: &str = r#"# Analyzer CLI + +The `analyzer` CLI is available in this environment for firmware and container security scanning. + +- **Usage guide:** Read [CONTEXT.md](CONTEXT.md) for CLI syntax, flags, and usage patterns. +- **API skills:** See the `skills/` directory for per-resource API capabilities. +- **Schema introspection:** Run `analyzer schema ..` to discover parameters and types at runtime. + +When using the analyzer CLI, follow the rules of engagement in [CONTEXT.md](CONTEXT.md). +"#; + +const ANALYZER_PERMISSION: &str = "Bash(analyzer *)"; + +/// Run the init-agent command, writing all artifacts to `~/.claude/`. +pub async fn run_init_agent(discovery_flag: Option<&str>) -> Result<()> { + let claude_dir = resolve_claude_dir()?; + init_agent_to_dir(&claude_dir, discovery_flag).await +} + +/// Core logic, factored out for testing with arbitrary target directories. +pub async fn init_agent_to_dir(claude_dir: &Path, discovery_flag: Option<&str>) -> Result<()> { + std::fs::create_dir_all(claude_dir) + .with_context(|| format!("failed to create {}", claude_dir.display()))?; + + // 1. Generate skills + let skills_dir = claude_dir.join("skills"); + output::status("Skills", "generating from discovery documents..."); + for entry in services::SERVICES { + let doc = if let Some(flag) = discovery_flag { + let source = discovery::resolve_source(Some(flag))?; + discovery::load(&source).await? + } else { + discovery::load_for_service(entry).await? + }; + generate_skills::generate_for_service(&doc, entry, &skills_dir)?; + generate_skills::generate_shared(entry, &skills_dir)?; + } + + // 2. Write CONTEXT.md + let context_path = claude_dir.join("CONTEXT.md"); + std::fs::write(&context_path, CONTEXT_MD) + .with_context(|| format!("failed to write {}", context_path.display()))?; + + // 3. Write CLAUDE.md + let claude_md_path = claude_dir.join("CLAUDE.md"); + write_claude_md(&claude_md_path)?; + + // 4. Merge settings.json + let settings_path = claude_dir.join("settings.json"); + merge_settings_json(&settings_path)?; + + // Summary + output::success("Claude Code agent integration configured"); + output::status("Skills", &format!("{}", skills_dir.display())); + output::status("Context", &format!("{}", context_path.display())); + output::status("CLAUDE.md", &format!("{}", claude_md_path.display())); + output::status("Settings", &format!("{}", settings_path.display())); + + Ok(()) +} + +/// Write CLAUDE.md, preserving any existing user content above a marker. +fn write_claude_md(path: &Path) -> Result<()> { + std::fs::write(path, CLAUDE_MD_TEMPLATE) + .with_context(|| format!("failed to write {}", path.display())) +} + +/// Merge `Bash(analyzer *)` into `settings.json` without clobbering existing permissions. +fn merge_settings_json(path: &Path) -> Result<()> { + let mut settings: serde_json::Value = if path.exists() { + let contents = std::fs::read_to_string(path) + .with_context(|| format!("failed to read {}", path.display()))?; + serde_json::from_str(&contents).context("settings.json is not valid JSON")? + } else { + serde_json::json!({}) + }; + + let permissions = settings + .as_object_mut() + .context("settings.json root must be an object")? + .entry("permissions") + .or_insert_with(|| serde_json::json!({})); + let allow = permissions + .as_object_mut() + .context("permissions must be an object")? + .entry("allow") + .or_insert_with(|| serde_json::json!([])); + let allow_arr = allow + .as_array_mut() + .context("permissions.allow must be an array")?; + + if !allow_arr + .iter() + .any(|v| v.as_str() == Some(ANALYZER_PERMISSION)) + { + allow_arr.push(serde_json::Value::String( + ANALYZER_PERMISSION.to_string(), + )); + } + + std::fs::write(path, serde_json::to_string_pretty(&settings)?) + .with_context(|| format!("failed to write {}", path.display())) +} + +fn resolve_claude_dir() -> Result { + let home = dirs::home_dir().context("could not determine home directory")?; + Ok(home.join(".claude")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn merge_settings_creates_new_file() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 1); + assert_eq!(allow[0].as_str().unwrap(), ANALYZER_PERMISSION); + } + + #[test] + fn merge_settings_preserves_existing_permissions() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + let existing = serde_json::json!({ + "permissions": { + "allow": ["Read", "Bash(git *)"] + } + }); + std::fs::write(&path, serde_json::to_string_pretty(&existing).unwrap()).unwrap(); + + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 3); + assert!(allow.iter().any(|v| v.as_str() == Some("Read"))); + assert!(allow.iter().any(|v| v.as_str() == Some("Bash(git *)"))); + assert!(allow + .iter() + .any(|v| v.as_str() == Some(ANALYZER_PERMISSION))); + } + + #[test] + fn merge_settings_is_idempotent() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("settings.json"); + + merge_settings_json(&path).unwrap(); + merge_settings_json(&path).unwrap(); + + let contents: serde_json::Value = + serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); + let allow = contents["permissions"]["allow"].as_array().unwrap(); + assert_eq!(allow.len(), 1, "should not duplicate the permission entry"); + } + + #[test] + fn claude_md_is_written() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("CLAUDE.md"); + + write_claude_md(&path).unwrap(); + + let contents = std::fs::read_to_string(&path).unwrap(); + assert!(contents.contains("CONTEXT.md")); + assert!(contents.contains("skills/")); + assert!(contents.contains("analyzer schema")); + } + + #[test] + fn context_md_is_embedded() { + // Verify the compile-time include worked + assert!(CONTEXT_MD.contains("Analyzer CLI")); + assert!(CONTEXT_MD.contains("Rules of Engagement")); + } +} diff --git a/src/commands/mod.rs b/src/commands/mod.rs index d25aed7..8c21102 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -2,5 +2,6 @@ pub mod auth; pub mod config; +pub mod init_agent; pub mod object; pub mod scan; diff --git a/src/discovery.rs b/src/discovery.rs index f666072..282766a 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -127,7 +127,16 @@ pub async fn load_for_service(entry: &ServiceEntry) -> Result let cache_dir = dirs::cache_dir() .unwrap_or_else(|| PathBuf::from(".cache")) .join("analyzer"); - std::fs::create_dir_all(&cache_dir) + load_for_service_with_cache(entry, &cache_dir).await +} + +/// Core logic for [`load_for_service`], factored out so tests can supply a +/// custom cache directory. +pub async fn load_for_service_with_cache( + entry: &ServiceEntry, + cache_dir: &std::path::Path, +) -> Result { + std::fs::create_dir_all(cache_dir) .with_context(|| format!("failed to create cache dir {}", cache_dir.display()))?; let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); @@ -143,21 +152,34 @@ pub async fn load_for_service(entry: &ServiceEntry) -> Result } } - // Fetch from URL - let json = reqwest::get(entry.discovery_url) - .await - .with_context(|| format!("failed to fetch {}", entry.discovery_url))? - .text() - .await - .context("failed to read response body")?; - - // Write cache (best-effort) - if let Err(e) = std::fs::write(&cache_file, &json) { - eprintln!( - "warning: failed to write cache {}: {e}", - cache_file.display() - ); - } + // Fetch from URL (fall back to stale cache on failure) + let json = match reqwest::get(entry.discovery_url).await { + Ok(resp) => { + let text = resp.text().await.context("failed to read response body")?; + // Write cache (best-effort) + if let Err(e) = std::fs::write(&cache_file, &text) { + eprintln!( + "warning: failed to write cache {}: {e}", + cache_file.display() + ); + } + text + } + Err(fetch_err) => { + if cache_file.exists() { + eprintln!( + "warning: failed to fetch {} — using stale cache: {fetch_err}", + entry.discovery_url + ); + std::fs::read_to_string(&cache_file).with_context(|| { + format!("failed to read stale cache {}", cache_file.display()) + })? + } else { + return Err(fetch_err) + .with_context(|| format!("failed to fetch {}", entry.discovery_url)); + } + } + }; serde_json::from_str(&json).context("failed to parse discovery document") } diff --git a/src/main.rs b/src/main.rs index aa870f3..e2f38cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -120,6 +120,12 @@ enum Command { /// Fetches discovery documents from the service registry (or uses --discovery /// override) and writes markdown skill files to `skills/`. GenerateSkills, + + /// Install Claude Code agent integration globally. + /// + /// Generates skills, CONTEXT.md, CLAUDE.md, and settings.json in ~/.claude/ + /// so Claude Code automatically discovers the analyzer CLI in every project. + InitAgent, } // -- Config subcommands ------------------------------------------------------- @@ -630,6 +636,10 @@ async fn run(cli: Cli) -> Result<()> { println!("\nDone."); Ok(()) } + + Command::InitAgent => { + commands::init_agent::run_init_agent(discovery_flag.as_deref()).await + } } } diff --git a/tests/discovery_cache.rs b/tests/discovery_cache.rs new file mode 100644 index 0000000..2e2d988 --- /dev/null +++ b/tests/discovery_cache.rs @@ -0,0 +1,103 @@ +//! Integration tests for discovery document cache fallback. +//! +//! Verifies that `load_for_service_with_cache` falls back to a stale (expired) +//! cached discovery document when the network fetch fails, and that it errors +//! correctly when no cache exists at all. + +use std::time::{Duration, SystemTime}; + +use analyzer_cli::discovery::load_for_service_with_cache; +use analyzer_cli::services::ServiceEntry; + +/// Minimal valid discovery document used as a cache fixture. +const MINIMAL_DISCOVERY: &str = r#"{ + "name": "test-api", + "version": "1.0.0", + "title": "Test API", + "rootUrl": "https://example.com/", + "servicePath": "api/", + "schemas": {}, + "resources": {} +}"#; + +/// A service entry whose discovery URL will never resolve (port 1 is +/// unreachable on localhost), so every fetch attempt fails quickly. +fn unreachable_service() -> ServiceEntry { + ServiceEntry { + aliases: &["test"], + api_name: "test-api", + version: "1.0.0", + discovery_url: "http://127.0.0.1:1/never-reachable.json", + description: "test service", + } +} + +/// Helper: write a cache file and backdate its mtime so it looks expired. +fn write_stale_cache(cache_dir: &std::path::Path, entry: &ServiceEntry) { + std::fs::create_dir_all(cache_dir).unwrap(); + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + std::fs::write(&cache_file, MINIMAL_DISCOVERY).unwrap(); + + // Set mtime to 25 hours ago so it exceeds the 24h TTL. + let stale_time = SystemTime::now() - Duration::from_secs(25 * 3600); + filetime::FileTime::from_system_time(stale_time); + let ft = filetime::FileTime::from_system_time(stale_time); + filetime::set_file_mtime(&cache_file, ft).unwrap(); +} + +mod stale_cache_fallback { + use super::*; + + #[tokio::test] + async fn uses_stale_cache_when_fetch_fails() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + write_stale_cache(&cache_dir, &entry); + + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(doc.is_ok(), "should fall back to stale cache: {doc:?}"); + let doc = doc.unwrap(); + assert_eq!(doc.name, "test-api"); + assert_eq!(doc.version, "1.0.0"); + } + + #[tokio::test] + async fn errors_when_no_cache_and_fetch_fails() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + // No cache file written — should fail with fetch error. + let result = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(result.is_err()); + let err_msg = format!("{:#}", result.unwrap_err()); + assert!( + err_msg.contains("failed to fetch"), + "error should mention fetch failure: {err_msg}" + ); + } +} + +mod fresh_cache { + use super::*; + + #[tokio::test] + async fn returns_fresh_cache_without_fetching() { + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = unreachable_service(); + + // Write a cache file with current mtime (fresh, within 24h TTL). + std::fs::create_dir_all(&cache_dir).unwrap(); + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + std::fs::write(&cache_file, MINIMAL_DISCOVERY).unwrap(); + + // Even though the URL is unreachable, fresh cache should be returned + // without attempting a fetch. + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(doc.is_ok(), "fresh cache should be used: {doc:?}"); + assert_eq!(doc.unwrap().name, "test-api"); + } +} From 4c1a49932a8ed5e1edf4611ce6ef6b59dc6bbab0 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 14:46:41 +0100 Subject: [PATCH 16/38] feat(dommyrock-analyzer-cli): discovery file caching fallbacks on Net failure and non 200 statuses --- src/commands/init_agent.rs | 12 +++---- src/discovery.rs | 38 ++++++++++++++-------- src/main.rs | 4 +-- tests/discovery_cache.rs | 65 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 94 insertions(+), 25 deletions(-) diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs index 2ea7dc9..282bb70 100644 --- a/src/commands/init_agent.rs +++ b/src/commands/init_agent.rs @@ -111,9 +111,7 @@ fn merge_settings_json(path: &Path) -> Result<()> { .iter() .any(|v| v.as_str() == Some(ANALYZER_PERMISSION)) { - allow_arr.push(serde_json::Value::String( - ANALYZER_PERMISSION.to_string(), - )); + allow_arr.push(serde_json::Value::String(ANALYZER_PERMISSION.to_string())); } std::fs::write(path, serde_json::to_string_pretty(&settings)?) @@ -163,9 +161,11 @@ mod tests { assert_eq!(allow.len(), 3); assert!(allow.iter().any(|v| v.as_str() == Some("Read"))); assert!(allow.iter().any(|v| v.as_str() == Some("Bash(git *)"))); - assert!(allow - .iter() - .any(|v| v.as_str() == Some(ANALYZER_PERMISSION))); + assert!( + allow + .iter() + .any(|v| v.as_str() == Some(ANALYZER_PERMISSION)) + ); } #[test] diff --git a/src/discovery.rs b/src/discovery.rs index 282766a..2f513b9 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -110,11 +110,14 @@ pub async fn load(source: &DiscoverySource) -> Result { DiscoverySource::File(path) => std::fs::read_to_string(path) .with_context(|| format!("failed to read discovery file: {}", path.display()))?, DiscoverySource::Url(url) => { - reqwest::get(url) + let resp = reqwest::get(url) .await - .with_context(|| format!("failed to fetch discovery document from {url}"))? - .text() - .await? + .with_context(|| format!("failed to fetch discovery document from {url}"))?; + let status = resp.status(); + if !status.is_success() { + bail!("fetch {url} returned HTTP {status}"); + } + resp.text().await? } }; serde_json::from_str(&json_str).context("failed to parse discovery document") @@ -152,10 +155,21 @@ pub async fn load_for_service_with_cache( } } - // Fetch from URL (fall back to stale cache on failure) - let json = match reqwest::get(entry.discovery_url).await { - Ok(resp) => { - let text = resp.text().await.context("failed to read response body")?; + // Fetch from URL (fall back to stale cache on failure or non-200 response) + let fetch_result = async { + let resp = reqwest::get(entry.discovery_url) + .await + .with_context(|| format!("failed to fetch {}", entry.discovery_url))?; + let status = resp.status(); + if !status.is_success() { + bail!("fetch {} returned HTTP {status}", entry.discovery_url); + } + resp.text().await.context("failed to read response body") + } + .await; + + let json = match fetch_result { + Ok(text) => { // Write cache (best-effort) if let Err(e) = std::fs::write(&cache_file, &text) { eprintln!( @@ -167,16 +181,12 @@ pub async fn load_for_service_with_cache( } Err(fetch_err) => { if cache_file.exists() { - eprintln!( - "warning: failed to fetch {} — using stale cache: {fetch_err}", - entry.discovery_url - ); + eprintln!("warning: {fetch_err} — using stale cache",); std::fs::read_to_string(&cache_file).with_context(|| { format!("failed to read stale cache {}", cache_file.display()) })? } else { - return Err(fetch_err) - .with_context(|| format!("failed to fetch {}", entry.discovery_url)); + return Err(fetch_err); } } }; diff --git a/src/main.rs b/src/main.rs index e2f38cc..da271f8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -637,9 +637,7 @@ async fn run(cli: Cli) -> Result<()> { Ok(()) } - Command::InitAgent => { - commands::init_agent::run_init_agent(discovery_flag.as_deref()).await - } + Command::InitAgent => commands::init_agent::run_init_agent(discovery_flag.as_deref()).await, } } diff --git a/tests/discovery_cache.rs b/tests/discovery_cache.rs index 2e2d988..94ff9be 100644 --- a/tests/discovery_cache.rs +++ b/tests/discovery_cache.rs @@ -1,13 +1,14 @@ //! Integration tests for discovery document cache fallback. //! //! Verifies that `load_for_service_with_cache` falls back to a stale (expired) -//! cached discovery document when the network fetch fails, and that it errors -//! correctly when no cache exists at all. +//! cached discovery document when the network fetch fails or returns a non-200 +//! status, and that it errors correctly when no cache exists at all. use std::time::{Duration, SystemTime}; use analyzer_cli::discovery::load_for_service_with_cache; use analyzer_cli::services::ServiceEntry; +use wiremock::{Mock, MockServer, ResponseTemplate}; /// Minimal valid discovery document used as a cache fixture. const MINIMAL_DISCOVERY: &str = r#"{ @@ -80,6 +81,66 @@ mod stale_cache_fallback { } } +mod non_200_response { + use super::*; + + /// Helper: create a service entry pointing at a wiremock server. + fn service_for_mock(url: &str) -> ServiceEntry { + // Leak the string so we get a &'static str for the ServiceEntry. + let leaked: &'static str = Box::leak(url.to_string().into_boxed_str()); + ServiceEntry { + aliases: &["test"], + api_name: "test-api", + version: "1.0.0", + discovery_url: leaked, + description: "test service", + } + } + + #[tokio::test] + async fn falls_back_to_stale_cache_on_404() { + let server = MockServer::start().await; + Mock::given(wiremock::matchers::any()) + .respond_with(ResponseTemplate::new(404).set_body_string("Not Found")) + .mount(&server) + .await; + + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = service_for_mock(&format!("{}/discovery.json", server.uri())); + + write_stale_cache(&cache_dir, &entry); + + let doc = load_for_service_with_cache(&entry, &cache_dir).await; + assert!( + doc.is_ok(), + "should fall back to stale cache on 404: {doc:?}" + ); + assert_eq!(doc.unwrap().name, "test-api"); + } + + #[tokio::test] + async fn errors_on_500_without_cache() { + let server = MockServer::start().await; + Mock::given(wiremock::matchers::any()) + .respond_with(ResponseTemplate::new(500).set_body_string("Internal Server Error")) + .mount(&server) + .await; + + let dir = tempfile::tempdir().unwrap(); + let cache_dir = dir.path().join("analyzer"); + let entry = service_for_mock(&format!("{}/discovery.json", server.uri())); + + let result = load_for_service_with_cache(&entry, &cache_dir).await; + assert!(result.is_err()); + let err_msg = format!("{:#}", result.unwrap_err()); + assert!( + err_msg.contains("HTTP"), + "error should mention HTTP status: {err_msg}" + ); + } +} + mod fresh_cache { use super::*; From a89b0ee23c19a2360c04ecfdb1dec2081e361157 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 15:45:16 +0100 Subject: [PATCH 17/38] feat(dommyrock-analyzer-cli): rename cached discovery file and handle /api suffix for discovery service --- src/api/mod.rs | 2 +- src/config.rs | 22 ++++++++++++++++++++++ src/services.rs | 4 ++-- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index 8d3bc02..dd3176d 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -155,7 +155,7 @@ pub async fn dispatch( .await; } - let cfg = config::resolve(api_key, url, profile)?; + let cfg = config::resolve_for_discovery(api_key, url, profile)?; let client = AnalyzerClient::new(cfg.url, &cfg.api_key)?; executor::execute_method( Some(&client), diff --git a/src/config.rs b/src/config.rs index 239af9d..613e1a9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -152,3 +152,25 @@ pub fn resolve( profile: profile_name, }) } + +/// Resolve config for discovery-driven commands (`api`, `schema`). +/// +/// Discovery method paths already include the `api/` segment, so the base +/// URL must not contain it. This strips a trailing `/api/` (or `/api`) from +/// whichever URL the normal resolver produces. +pub fn resolve_for_discovery( + cli_api_key: Option<&str>, + cli_url: Option<&str>, + cli_profile: Option<&str>, +) -> Result { + let mut cfg = resolve(cli_api_key, cli_url, cli_profile)?; + let url_str = cfg.url.as_str(); + let trimmed = url_str + .strip_suffix("api/") + .or_else(|| url_str.strip_suffix("api")) + .unwrap_or(url_str); + cfg.url = trimmed + .parse() + .with_context(|| format!("invalid URL: {trimmed}"))?; + Ok(cfg) +} diff --git a/src/services.rs b/src/services.rs index cbff2a9..b8226fe 100644 --- a/src/services.rs +++ b/src/services.rs @@ -13,7 +13,7 @@ pub struct ServiceEntry { pub const SERVICES: &[ServiceEntry] = &[ ServiceEntry { aliases: &["analyzer"], - api_name: "analyzer-api-routes", + api_name: "analyzer-api-discovery", version: "0.5.0", discovery_url: "https://analyzer.exein.dev/analyzer-discovery.json", description: "Firmware and software image security analysis", @@ -55,7 +55,7 @@ mod tests { #[test] fn resolve_known_service() { let entry = resolve_service("analyzer").expect("should resolve"); - assert_eq!(entry.api_name, "analyzer-api-routes"); + assert_eq!(entry.api_name, "analyzer-api-discovery"); assert_eq!(entry.version, "0.5.0"); } From 1d70660d7d615a027365302a6bab9c5a3af19261 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 16:03:50 +0100 Subject: [PATCH 18/38] feat(dommyrock-analyzer-cli): simplify analyzer-shared skill generation --- CONTEXT.md | 6 +-- skills/analyzer-shared/SKILL.md | 81 ++++++---------------------- src/api/generate_skills.rs | 95 +++++++-------------------------- src/commands/init_agent.rs | 59 ++------------------ 4 files changed, 42 insertions(+), 199 deletions(-) diff --git a/CONTEXT.md b/CONTEXT.md index 39f99e7..770b3e7 100644 --- a/CONTEXT.md +++ b/CONTEXT.md @@ -23,11 +23,11 @@ What it writes: | File | Purpose | |------|---------| | `~/.claude/skills/` | Per-resource API skill files (generated from discovery documents) | -| `~/.claude/CONTEXT.md` | This file — CLI syntax, flags, and usage patterns | -| `~/.claude/CLAUDE.md` | Entry point that tells Claude Code about the tool | | `~/.claude/settings.json` | Allowlists the `analyzer` binary for Claude Code | -Re-run `analyzer init-agent` after upgrading to refresh skills and context. +Skills are loaded on-demand by Claude Code — no global `CLAUDE.md` or `CONTEXT.md` is written, so the analyzer context only appears when relevant. + +Re-run `analyzer init-agent` after upgrading to refresh skills. ## Rules of Engagement for Agents diff --git a/skills/analyzer-shared/SKILL.md b/skills/analyzer-shared/SKILL.md index 66027e5..acf8835 100644 --- a/skills/analyzer-shared/SKILL.md +++ b/skills/analyzer-shared/SKILL.md @@ -1,6 +1,6 @@ --- name: analyzer-shared -description: "analyzer CLI: Shared patterns for authentication, global flags, and error handling." +description: "analyzer CLI: auth, introspection, and agent rules for firmware/container security scanning." metadata: openclaw: category: "security" @@ -8,79 +8,32 @@ metadata: bins: ["analyzer"] --- -# analyzer — Shared Reference +# analyzer CLI — Agent Rules -## Registered Services +## Discover before you act -| Alias | API | Description | -|-------|-----|-------------| -| `analyzer` | analyzer-api-routes | Firmware and software image security analysis | - -## Authentication +The CLI is self-documenting. Use these before every unfamiliar call: ```bash -# Interactive login (prompts for API key, validates, saves) -analyzer login - -# Environment variable -export ANALYZER_API_KEY="your-api-key" +analyzer api analyzer --help # browse resources +analyzer api analyzer --help # browse methods +analyzer schema analyzer.. # inspect params, types, defaults ``` -## Global Flags +## Rules -| Flag | Description | -|------|-------------| -| `--params ''` | Path and query parameters | -| `--json ''` | Request body for POST/PUT/PATCH | -| `--fields ''` | Limit response fields (context window protection) | -| `--page-all` | Auto-paginate results as NDJSON | -| `--dry-run` | Validate and print request without executing | -| `--discovery ` | Override discovery document (dev/testing) | -| `--format ` | Output format: `human` (default), `json`, `table`, `csv` | +1. **Schema first** — run `analyzer schema` before building `--params` or `--json`. +2. **Protect context** — add `--fields` to every `list` and `get` call. +3. **Dry-run mutations** — use `--dry-run` for create/update/delete, then confirm with the user before executing. +4. **Poll, don't guess** — after scheduling a scan, poll status until complete. -## CLI Syntax +## Auth ```bash -# API commands (service name is first positional arg) -analyzer api [sub-resource] [flags] - -# Schema introspection (service name is first dotted segment) -analyzer schema .. - -# Generate skills for all services -analyzer generate-skills +analyzer login # interactive, saves to ~/.config/analyzer/ +export ANALYZER_API_KEY="..." # or set env var ``` -## Schema Introspection - -Before calling any API method, inspect it: - -```bash -# Browse all resources for a service -analyzer schema analyzer.api - -# Inspect a specific method -analyzer schema analyzer.. - -# Browse a resource's methods -analyzer schema analyzer. -``` - -Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Security Rules - -- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution -- **Always** confirm with user before executing write/delete commands -- Prefer `--fields` to limit response size and protect the context window -- Poll scan status — do not guess when a scan completes - -## Error Handling - -All errors are JSON on stderr with a non-zero exit code: - -```json -{"error": {"code": 404, "message": "Object not found"}} -``` +## Errors -Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. +All errors: JSON on stderr, non-zero exit. Parse before retrying. diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 1a19968..7f75583 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -16,7 +16,7 @@ use std::path::Path; use anyhow::{Context, Result}; use crate::discovery::{DiscoveryDocument, DiscoveryResource}; -use crate::services::{SERVICES, ServiceEntry}; +use crate::services::ServiceEntry; struct SkillIndexEntry { name: String, @@ -288,23 +288,10 @@ fn truncate_desc(desc: &str) -> String { fn generate_shared_skill(entry: &ServiceEntry) -> String { let alias = entry.aliases[0]; - // Build the services table dynamically - let mut services_table = String::new(); - for svc in SERVICES { - writeln!( - services_table, - "| `{}` | {} | {} |", - svc.aliases.join(", "), - svc.api_name, - svc.description - ) - .unwrap(); - } - format!( r#"--- name: {alias}-shared -description: "{alias} CLI: Shared patterns for authentication, global flags, and error handling." +description: "{alias} CLI: auth, introspection, and agent rules for firmware/container security scanning." metadata: openclaw: category: "security" @@ -312,81 +299,35 @@ metadata: bins: ["analyzer"] --- -# {alias} — Shared Reference +# {alias} CLI — Agent Rules -## Registered Services +## Discover before you act -| Alias | API | Description | -|-------|-----|-------------| -{services_table} -## Authentication +The CLI is self-documenting. Use these before every unfamiliar call: ```bash -# Interactive login (prompts for API key, validates, saves) -analyzer login - -# Environment variable -export ANALYZER_API_KEY="your-api-key" +analyzer api {alias} --help # browse resources +analyzer api {alias} --help # browse methods +analyzer schema {alias}.. # inspect params, types, defaults ``` -## Global Flags +## Rules -| Flag | Description | -|------|-------------| -| `--params ''` | Path and query parameters | -| `--json ''` | Request body for POST/PUT/PATCH | -| `--fields ''` | Limit response fields (context window protection) | -| `--page-all` | Auto-paginate results as NDJSON | -| `--dry-run` | Validate and print request without executing | -| `--discovery ` | Override discovery document (dev/testing) | -| `--format ` | Output format: `human` (default), `json`, `table`, `csv` | +1. **Schema first** — run `analyzer schema` before building `--params` or `--json`. +2. **Protect context** — add `--fields` to every `list` and `get` call. +3. **Dry-run mutations** — use `--dry-run` for create/update/delete, then confirm with the user before executing. +4. **Poll, don't guess** — after scheduling a scan, poll status until complete. -## CLI Syntax +## Auth ```bash -# API commands (service name is first positional arg) -analyzer api [sub-resource] [flags] - -# Schema introspection (service name is first dotted segment) -analyzer schema .. - -# Generate skills for all services -analyzer generate-skills +analyzer login # interactive, saves to ~/.config/analyzer/ +export ANALYZER_API_KEY="..." # or set env var ``` -## Schema Introspection - -Before calling any API method, inspect it: - -```bash -# Browse all resources for a service -analyzer schema {alias}.api - -# Inspect a specific method -analyzer schema {alias}.. - -# Browse a resource's methods -analyzer schema {alias}. -``` - -Use `analyzer schema` output to build your `--params` and `--json` flags. - -## Security Rules - -- **Always** use `--dry-run` for mutating operations (create, update, delete) before actual execution -- **Always** confirm with user before executing write/delete commands -- Prefer `--fields` to limit response size and protect the context window -- Poll scan status — do not guess when a scan completes - -## Error Handling - -All errors are JSON on stderr with a non-zero exit code: - -```json -{{"error": {{"code": 404, "message": "Object not found"}}}} -``` +## Errors -Check the exit code: `0` = success, non-zero = failure. Parse the error JSON to decide next steps. Do not retry without understanding the error. +All errors: JSON on stderr, non-zero exit. Parse before retrying. "# ) } diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs index 282bb70..0010912 100644 --- a/src/commands/init_agent.rs +++ b/src/commands/init_agent.rs @@ -1,7 +1,8 @@ //! `analyzer init-agent` — install Claude Code agent integration globally. //! -//! Writes skills, CONTEXT.md, CLAUDE.md, and settings.json into `~/.claude/` -//! so that Claude Code automatically discovers the `analyzer` CLI in every project. +//! Writes skills and settings.json into `~/.claude/` so that Claude Code +//! automatically discovers the `analyzer` CLI in every project via on-demand +//! skill loading — no global CLAUDE.md or CONTEXT.md pollution. use std::path::Path; @@ -12,21 +13,6 @@ use crate::discovery; use crate::output; use crate::services; -/// Embedded copy of the project-root CONTEXT.md, baked in at compile time. -const CONTEXT_MD: &str = include_str!("../../CONTEXT.md"); - -/// Minimal CLAUDE.md that tells Claude Code about the analyzer CLI. -const CLAUDE_MD_TEMPLATE: &str = r#"# Analyzer CLI - -The `analyzer` CLI is available in this environment for firmware and container security scanning. - -- **Usage guide:** Read [CONTEXT.md](CONTEXT.md) for CLI syntax, flags, and usage patterns. -- **API skills:** See the `skills/` directory for per-resource API capabilities. -- **Schema introspection:** Run `analyzer schema ..` to discover parameters and types at runtime. - -When using the analyzer CLI, follow the rules of engagement in [CONTEXT.md](CONTEXT.md). -"#; - const ANALYZER_PERMISSION: &str = "Bash(analyzer *)"; /// Run the init-agent command, writing all artifacts to `~/.claude/`. @@ -54,35 +40,18 @@ pub async fn init_agent_to_dir(claude_dir: &Path, discovery_flag: Option<&str>) generate_skills::generate_shared(entry, &skills_dir)?; } - // 2. Write CONTEXT.md - let context_path = claude_dir.join("CONTEXT.md"); - std::fs::write(&context_path, CONTEXT_MD) - .with_context(|| format!("failed to write {}", context_path.display()))?; - - // 3. Write CLAUDE.md - let claude_md_path = claude_dir.join("CLAUDE.md"); - write_claude_md(&claude_md_path)?; - - // 4. Merge settings.json + // 2. Merge settings.json let settings_path = claude_dir.join("settings.json"); merge_settings_json(&settings_path)?; // Summary output::success("Claude Code agent integration configured"); output::status("Skills", &format!("{}", skills_dir.display())); - output::status("Context", &format!("{}", context_path.display())); - output::status("CLAUDE.md", &format!("{}", claude_md_path.display())); output::status("Settings", &format!("{}", settings_path.display())); Ok(()) } -/// Write CLAUDE.md, preserving any existing user content above a marker. -fn write_claude_md(path: &Path) -> Result<()> { - std::fs::write(path, CLAUDE_MD_TEMPLATE) - .with_context(|| format!("failed to write {}", path.display())) -} - /// Merge `Bash(analyzer *)` into `settings.json` without clobbering existing permissions. fn merge_settings_json(path: &Path) -> Result<()> { let mut settings: serde_json::Value = if path.exists() { @@ -181,24 +150,4 @@ mod tests { let allow = contents["permissions"]["allow"].as_array().unwrap(); assert_eq!(allow.len(), 1, "should not duplicate the permission entry"); } - - #[test] - fn claude_md_is_written() { - let dir = tempfile::tempdir().unwrap(); - let path = dir.path().join("CLAUDE.md"); - - write_claude_md(&path).unwrap(); - - let contents = std::fs::read_to_string(&path).unwrap(); - assert!(contents.contains("CONTEXT.md")); - assert!(contents.contains("skills/")); - assert!(contents.contains("analyzer schema")); - } - - #[test] - fn context_md_is_embedded() { - // Verify the compile-time include worked - assert!(CONTEXT_MD.contains("Analyzer CLI")); - assert!(CONTEXT_MD.contains("Rules of Engagement")); - } } From b43da54f206c37e727e836f9e968680636889fbd Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 16:43:51 +0100 Subject: [PATCH 19/38] feat(dommyrock-analyzer-cli): fix --format arg --- src/api/mod.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index dd3176d..0262f25 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -72,6 +72,13 @@ pub fn build_api_command(doc: &DiscoveryDocument) -> clap::Command { .default_value("100") .help("Delay between page fetches in milliseconds (default: 100)") .global(true), + ) + .arg( + clap::Arg::new("format") + .long("format") + .value_parser(clap::builder::EnumValueParser::::new()) + .help("Output format: human, json, table, csv") + .global(true), ); cmd = add_resource_subcommands(cmd, api_resource); @@ -121,6 +128,13 @@ pub async fn dispatch( let method = discovery::resolve_method(api_resource, &path_refs) .with_context(|| format!("no method found at path: {}", path.join(".")))?; + // --format from the api subcommand overrides the top-level value + let format = leaf_matches + .get_one::("format") + .or_else(|| matches.get_one::("format")) + .copied() + .unwrap_or(format); + let params_json = get_global_arg(leaf_matches, matches, "params"); let body_json = get_global_arg(leaf_matches, matches, "json"); let fields = get_global_arg(leaf_matches, matches, "fields"); @@ -156,7 +170,10 @@ pub async fn dispatch( } let cfg = config::resolve_for_discovery(api_key, url, profile)?; - let client = AnalyzerClient::new(cfg.url, &cfg.api_key)?; + let client = match &cfg.api_key { + Some(key) => AnalyzerClient::new(cfg.url, key)?, + None => AnalyzerClient::new_anonymous(cfg.url)?, + }; executor::execute_method( Some(&client), method, From a5f9682bd6fe927b3b21f203224f2410ca7e682f Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 16:47:09 +0100 Subject: [PATCH 20/38] feat(dommyrock-analyzer-cli): make api-key optional for local testing purposes --- .gitignore | 3 ++- example.env | 13 +++++++++++++ src/client/mod.rs | 7 +++++++ src/config.rs | 15 ++------------- src/main.rs | 11 ++++++++++- 5 files changed, 34 insertions(+), 15 deletions(-) create mode 100644 example.env diff --git a/.gitignore b/.gitignore index e1b9cbe..d1adbdc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /target Cargo.lock usage_examples.txt -*-discovery.json \ No newline at end of file +*-discovery.json +.env \ No newline at end of file diff --git a/example.env b/example.env new file mode 100644 index 0000000..46bda28 --- /dev/null +++ b/example.env @@ -0,0 +1,13 @@ +# Copy to .env and adjust values for local development. +# +# The CLI reads these via --flags and env vars (see `analyzer --help`). +# Precedence: CLI flags > env vars > config file > defaults. + +# Override the API base URL (default: https://analyzer.exein.io/api/) +ANALYZER_URL=http://localhost:8000/ + +# API key (or use `analyzer login` to save interactively) +# ANALYZER_API_KEY=your-api-key + +# Override the discovery document source (file path or URL) +# ANALYZER_DISCOVERY_URL=./analyzer-discovery.json diff --git a/src/client/mod.rs b/src/client/mod.rs index 18617f1..081fb66 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -44,6 +44,13 @@ impl AnalyzerClient { Ok(Self { client, base_url }) } + /// Create a client with no auth headers (for local development). + pub fn new_anonymous(base_url: Url) -> Result { + let client = Client::builder().user_agent(APP_USER_AGENT).build()?; + + Ok(Self { client, base_url }) + } + // -- Health --------------------------------------------------------------- pub async fn health(&self) -> Result { diff --git a/src/config.rs b/src/config.rs index 613e1a9..f573560 100644 --- a/src/config.rs +++ b/src/config.rs @@ -21,7 +21,7 @@ const DEFAULT_PROFILE: &str = "default"; /// Resolved runtime configuration, ready to use. #[derive(Debug, Clone)] pub struct ResolvedConfig { - pub api_key: String, + pub api_key: Option, pub url: Url, #[allow(dead_code)] pub profile: String, @@ -129,23 +129,12 @@ pub fn resolve( .parse() .with_context(|| format!("invalid URL: {url_str}"))?; - // API key: flag > env > profile + // API key: flag > env > profile (optional for discovery-driven commands) let api_key = cli_api_key .map(String::from) .or_else(|| std::env::var("ANALYZER_API_KEY").ok()) .or_else(|| profile.api_key.clone()); - let api_key = match api_key { - Some(key) => key, - None => anyhow::bail!( - "no API key provided\n\n\ - Set it with one of:\n \ - analyzer login\n \ - analyzer --api-key ...\n \ - export ANALYZER_API_KEY=" - ), - }; - Ok(ResolvedConfig { api_key, url, diff --git a/src/main.rs b/src/main.rs index da271f8..e2a0221 100644 --- a/src/main.rs +++ b/src/main.rs @@ -647,5 +647,14 @@ fn make_client( profile: Option<&str>, ) -> Result { let cfg = config::resolve(api_key, url, profile)?; - AnalyzerClient::new(cfg.url, &cfg.api_key) + let api_key = cfg.api_key.ok_or_else(|| { + anyhow::anyhow!( + "no API key provided\n\n\ + Set it with one of:\n \ + analyzer login\n \ + analyzer --api-key ...\n \ + export ANALYZER_API_KEY=" + ) + })?; + AnalyzerClient::new(cfg.url, &api_key) } From 3cf04c22823415d27471ab2227246b3769f87e7d Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 17:18:55 +0100 Subject: [PATCH 21/38] feat(dommyrock-analyzer-cli): fix pagination results interpretation --- src/api/executor.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/src/api/executor.rs b/src/api/executor.rs index 836a181..28827ed 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -265,13 +265,49 @@ fn extract_next_page( /// /// Keeps only the specified top-level keys from JSON objects. /// Arrays are filtered element-wise. Primitives pass through unchanged. +/// Metadata keys that wrap paginated data (not user-facing fields). +const WRAPPER_SKIP_KEYS: &[&str] = &[ + "nextPageToken", + "kind", + "total-findings", + "total_findings", + "_links", + "_embedded", + "links", +]; + fn filter_fields(value: Value, fields: &[&str]) -> Value { match value { Value::Object(map) => { let filtered: serde_json::Map<_, _> = map - .into_iter() + .iter() .filter(|(k, _)| fields.contains(&k.as_str())) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); + + // If direct filtering found matches, use them. + if !filtered.is_empty() { + return Value::Object(filtered); + } + + // Otherwise, look for a paginated wrapper array (e.g. {"data": [...]}) + // and filter each element inside it. + let mut new_map = serde_json::Map::new(); + for (k, v) in &map { + if WRAPPER_SKIP_KEYS.contains(&k.as_str()) || k.starts_with('_') { + continue; + } + if let Value::Array(arr) = v { + let filtered_arr: Vec = arr + .iter() + .map(|item| filter_fields(item.clone(), fields)) + .collect(); + new_map.insert(k.clone(), Value::Array(filtered_arr)); + return Value::Object(new_map); + } + } + + // No wrapper found either — return the (empty) direct filter result. Value::Object(filtered) } Value::Array(arr) => { @@ -327,6 +363,21 @@ mod tests { let input = json!("hello"); assert_eq!(super::super::filter_fields(input.clone(), &["id"]), input); } + + #[test] + fn should_descend_into_paginated_wrapper() { + let input = json!({ + "data": [ + {"id": "1", "name": "obj1", "score": 72, "tags": ["a"]}, + {"id": "2", "name": "obj2", "score": 85, "tags": ["b"]} + ], + "links": {"next": "/api/objects?page=2"} + }); + assert_eq!( + super::super::filter_fields(input, &["id", "name"]), + json!({"data": [{"id": "1", "name": "obj1"}, {"id": "2", "name": "obj2"}]}) + ); + } } mod substitute_path_params { From 6227de23701097152f74e14126ac5473f39cefa3 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 17:19:41 +0100 Subject: [PATCH 22/38] feat(dommyrock-analyzer-cli): update skill reasoning for scan results extraction --- skills/analyzer-scans/SKILL.md | 8 ++++++++ src/api/generate_skills.rs | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index 26f04d5..ffc14c6 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -82,3 +82,11 @@ analyzer schema analyzer.scans. ``` Use `analyzer schema` output to build your `--params` and `--json` flags. + +## Workflow: Fetching Analysis Results + +`analysis_id` parameters are UUIDs, not type names like "cve". To get results: + +1. Get analysis UUIDs: `analyzer api analyzer scans status list --params '{"id": "SCAN_ID"}'` +2. Find the UUID under the analysis type key (e.g., `cve.id`) +3. Use that UUID: `analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "THE_UUID"}'` diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 7f75583..1fd99be 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -217,6 +217,23 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. ) .unwrap(); + // Add workflow hints for resources with non-obvious ID lookup patterns + if name == "scans" { + write!( + out, + r#" +## Workflow: Fetching Analysis Results + +`analysis_id` parameters are UUIDs, not type names like "cve". To get results: + +1. Get analysis UUIDs: `analyzer api {service_alias} scans status list --params '{{"id": "SCAN_ID"}}'` +2. Find the UUID under the analysis type key (e.g., `cve.id`) +3. Use that UUID: `analyzer api {service_alias} scans results get --params '{{"scan_id": "SCAN_ID", "analysis_id": "THE_UUID"}}'` +"# + ) + .unwrap(); + } + out } From 5c75da284c4cc9afa6bb8f59b082c45b6e9a986d Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 17:26:16 +0100 Subject: [PATCH 23/38] feat(dommyrock-analyzer-cli): fix results pagination in skills --- src/api/generate_skills.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 1fd99be..db40f3f 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -224,11 +224,17 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. r#" ## Workflow: Fetching Analysis Results -`analysis_id` parameters are UUIDs, not type names like "cve". To get results: +`analysis_id` parameters are UUIDs, not type names like "cve". The `query` parameter is required. 1. Get analysis UUIDs: `analyzer api {service_alias} scans status list --params '{{"id": "SCAN_ID"}}'` 2. Find the UUID under the analysis type key (e.g., `cve.id`) -3. Use that UUID: `analyzer api {service_alias} scans results get --params '{{"scan_id": "SCAN_ID", "analysis_id": "THE_UUID"}}'` +3. Fetch results with required `query` param: + ```bash + analyzer api {service_alias} scans results get --params '{{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}}' + ``` + +The `query` value is a URL-encoded filter string. Minimum required: `sort-by=severity&sort-ord=asc`. +Optional additions: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. "# ) .unwrap(); From b96401c179f438f8e8f6baa0554a25fef0303395 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 17:53:07 +0100 Subject: [PATCH 24/38] feat(dommyrock-analyzer-cli): fix skill detail generation --- skills/analyzer-health/SKILL.md | 5 +++-- skills/analyzer-objects/SKILL.md | 5 +++-- skills/analyzer-scans/SKILL.md | 15 +++++++++++---- src/api/generate_skills.rs | 20 +++++++++++++++----- 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index d814fb5..179a5e5 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-health version: 0.5.0 -description: "Manage health via the analyzer API — Firmware and software image security analysis" +description: "Check analyzer API service health and availability" metadata: openclaw: category: "security" @@ -12,7 +12,8 @@ metadata: # health (0.5.0) -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **Before unfamiliar calls:** run `analyzer schema analyzer.health.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` ```bash analyzer api analyzer health [flags] diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index 47494a1..fa55f1a 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-objects version: 0.5.0 -description: "Manage objects via the analyzer API — Firmware and software image security analysis" +description: "List, create, update, delete objects (devices, firmware products, containers) via the analyzer CLI" metadata: openclaw: category: "security" @@ -12,7 +12,8 @@ metadata: # objects (0.5.0) -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **Before unfamiliar calls:** run `analyzer schema analyzer.objects.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` ```bash analyzer api analyzer objects [flags] diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index ffc14c6..4d39705 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-scans version: 0.5.0 -description: "Manage scans via the analyzer API — Firmware and software image security analysis" +description: "List scans, check status, get CVE/malware/hardening results, security scores, and reports via the analyzer CLI" metadata: openclaw: category: "security" @@ -12,7 +12,8 @@ metadata: # scans (0.5.0) -> **PREREQUISITE:** Read `../analyzer-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **Before unfamiliar calls:** run `analyzer schema analyzer.scans.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` ```bash analyzer api analyzer scans [flags] @@ -85,8 +86,14 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. ## Workflow: Fetching Analysis Results -`analysis_id` parameters are UUIDs, not type names like "cve". To get results: +`analysis_id` parameters are UUIDs, not type names like "cve". The `query` parameter is required. 1. Get analysis UUIDs: `analyzer api analyzer scans status list --params '{"id": "SCAN_ID"}'` 2. Find the UUID under the analysis type key (e.g., `cve.id`) -3. Use that UUID: `analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "THE_UUID"}'` +3. Fetch results with required `query` param: + ```bash + analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}' + ``` + +The `query` value is a URL-encoded filter string. Minimum required: `sort-by=severity&sort-ord=asc`. +Optional additions: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index db40f3f..2ad4e30 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -160,10 +160,19 @@ fn render_resource_skill( ) -> String { let skill_name = format!("{service_alias}-{name}"); - let description = format!( - "Manage {name} via the {service_alias} API — {}", - entry.description - ); + let description = match name { + "scans" => format!( + "List scans, check status, get CVE/malware/hardening results, security scores, and reports via the {service_alias} CLI" + ), + "objects" => format!( + "List, create, update, delete objects (devices, firmware products, containers) via the {service_alias} CLI" + ), + "health" => format!("Check {service_alias} API service health and availability"), + _ => format!( + "Manage {name} via the {service_alias} API — {}", + entry.description + ), + }; let mut out = format!( r#"--- @@ -180,7 +189,8 @@ metadata: # {name} ({version}) -> **PREREQUISITE:** Read `../{service_alias}-shared/SKILL.md` for auth, global flags, and security rules. If missing, run `analyzer generate-skills` to create it. +> **Before unfamiliar calls:** run `analyzer schema {service_alias}.{name}.` to inspect params, types, defaults. +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` ```bash analyzer api {service_alias} {name} [flags] From 5bc652bbabdec2170fb948fb4b1b28548e03a610 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Tue, 24 Mar 2026 18:14:51 +0100 Subject: [PATCH 25/38] feat(dommyrock-analyzer-cli): fix skill detail generation for analyzer-shared --- skills/analyzer-health/SKILL.md | 3 ++- skills/analyzer-objects/SKILL.md | 3 ++- skills/analyzer-scans/SKILL.md | 3 ++- src/api/generate_skills.rs | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index 179a5e5..de2544d 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -13,7 +13,8 @@ metadata: # health (0.5.0) > **Before unfamiliar calls:** run `analyzer schema analyzer.health.` to inspect params, types, defaults. -> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` ```bash analyzer api analyzer health [flags] diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index fa55f1a..023692b 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -13,7 +13,8 @@ metadata: # objects (0.5.0) > **Before unfamiliar calls:** run `analyzer schema analyzer.objects.` to inspect params, types, defaults. -> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` ```bash analyzer api analyzer objects [flags] diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index 4d39705..f1b6e65 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -13,7 +13,8 @@ metadata: # scans (0.5.0) > **Before unfamiliar calls:** run `analyzer schema analyzer.scans.` to inspect params, types, defaults. -> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../analyzer-shared/SKILL.md` ```bash analyzer api analyzer scans [flags] diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 2ad4e30..508b3fb 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -190,7 +190,8 @@ metadata: # {name} ({version}) > **Before unfamiliar calls:** run `analyzer schema {service_alias}.{name}.` to inspect params, types, defaults. -> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. **Auth:** `analyzer login` +> **Protect context:** add `--fields` to every `list`/`get`. **Dry-run mutations:** `--dry-run` first, confirm with user. +> **Auth/config/connection errors?** Read `../{service_alias}-shared/SKILL.md` ```bash analyzer api {service_alias} {name} [flags] From e75fadf3487bd4ba5979503322e22bad4886cac1 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 13:43:38 +0100 Subject: [PATCH 26/38] feat(dommyrock-analyzer-cli): fix skill detail generation and fix param double encoding --- .gitignore | 1 + src/api/executor.rs | 29 ++++++++++++++++++++++++++++- src/api/generate_skills.rs | 10 ++++++++-- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index d1adbdc..ab3a068 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ Cargo.lock usage_examples.txt *-discovery.json +*20*.txt .env \ No newline at end of file diff --git a/src/api/executor.rs b/src/api/executor.rs index 28827ed..b82871d 100644 --- a/src/api/executor.rs +++ b/src/api/executor.rs @@ -202,7 +202,18 @@ fn collect_query_params( Value::String(s) => s.clone(), other => other.to_string(), }; - query_params.push((name.clone(), val_str)); + // The "query" parameter is a passthrough query string + // (e.g. "sort-by=severity&sort-ord=asc"). Expand it into + // individual query params instead of sending ?query=... + if name == "query" && val_str.contains('=') { + for pair in val_str.split('&') { + if let Some((k, v)) = pair.split_once('=') { + query_params.push((k.to_string(), v.to_string())); + } + } + } else { + query_params.push((name.clone(), val_str)); + } } } } @@ -477,6 +488,22 @@ mod tests { assert_eq!(result.len(), 1); assert_eq!(result[0].0, "limit"); } + + #[test] + fn should_expand_query_passthrough_param() { + let mut parameters = BTreeMap::new(); + parameters.insert("query".to_string(), param("query", true, None)); + let params = serde_json::from_str::>( + r#"{"query": "sort-by=severity&sort-ord=asc&per-page=25"}"#, + ) + .unwrap(); + + let result = super::super::collect_query_params(¶meters, ¶ms); + assert_eq!(result.len(), 3); + assert!(result.contains(&("sort-by".to_string(), "severity".to_string()))); + assert!(result.contains(&("sort-ord".to_string(), "asc".to_string()))); + assert!(result.contains(&("per-page".to_string(), "25".to_string()))); + } } mod extract_next_page { diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 508b3fb..9eec0f3 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -244,8 +244,14 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. analyzer api {service_alias} scans results get --params '{{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}}' ``` -The `query` value is a URL-encoded filter string. Minimum required: `sort-by=severity&sort-ord=asc`. -Optional additions: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. +The `query` value contains filter params. Required fields per analysis type (all need `sort-ord=asc|desc`): +- **CVE, hardening, capabilities, password-hash:** `sort-by=severity` +- **malware:** `sort-by=filename` +- **crypto:** `sort-by=type` +- **kernel:** `sort-by=features` +- **software-bom:** `sort-by=name` + +Optional: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. "# ) .unwrap(); From d24752f9534bfaf69906b0b3dd647f19915c7740 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 13:45:44 +0100 Subject: [PATCH 27/38] feat(dommyrock-analyzer-cli): regenerate skills --- skills/analyzer-scans/SKILL.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index f1b6e65..d2a1572 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -96,5 +96,11 @@ Use `analyzer schema` output to build your `--params` and `--json` flags. analyzer api analyzer scans results get --params '{"scan_id": "SCAN_ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}' ``` -The `query` value is a URL-encoded filter string. Minimum required: `sort-by=severity&sort-ord=asc`. -Optional additions: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. +The `query` value contains filter params. Required fields per analysis type (all need `sort-ord=asc|desc`): +- **CVE, hardening, capabilities, password-hash:** `sort-by=severity` +- **malware:** `sort-by=filename` +- **crypto:** `sort-by=type` +- **kernel:** `sort-by=features` +- **software-bom:** `sort-by=name` + +Optional: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. From f9c0ef183af05b8a57478139208536a6dbc51c6f Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 15:03:41 +0100 Subject: [PATCH 28/38] feat(dommyrock-analyzer-cli): update skill generation for front matter descriptions --- skills/analyzer-health/SKILL.md | 2 +- skills/analyzer-objects/SKILL.md | 2 +- skills/analyzer-scans/SKILL.md | 2 +- src/api/generate_skills.rs | 92 ++++++++++++++++++++++++++------ 4 files changed, 78 insertions(+), 20 deletions(-) diff --git a/skills/analyzer-health/SKILL.md b/skills/analyzer-health/SKILL.md index de2544d..a06eb30 100644 --- a/skills/analyzer-health/SKILL.md +++ b/skills/analyzer-health/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-health version: 0.5.0 -description: "Check analyzer API service health and availability" +description: "List health via the analyzer CLI — Firmware and software image security analysis" metadata: openclaw: category: "security" diff --git a/skills/analyzer-objects/SKILL.md b/skills/analyzer-objects/SKILL.md index 023692b..deb237c 100644 --- a/skills/analyzer-objects/SKILL.md +++ b/skills/analyzer-objects/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-objects version: 0.5.0 -description: "List, create, update, delete objects (devices, firmware products, containers) via the analyzer CLI" +description: "Create, Delete, Get, List, Update objects; plus scans via the analyzer CLI — Firmware and software image security analysis" metadata: openclaw: category: "security" diff --git a/skills/analyzer-scans/SKILL.md b/skills/analyzer-scans/SKILL.md index d2a1572..4c6e6e1 100644 --- a/skills/analyzer-scans/SKILL.md +++ b/skills/analyzer-scans/SKILL.md @@ -1,7 +1,7 @@ --- name: analyzer-scans version: 0.5.0 -description: "List scans, check status, get CVE/malware/hardening results, security scores, and reports via the analyzer CLI" +description: "Create, Delete, Get, List scans; plus cancel, compliance-check, documents, and 7 more via the analyzer CLI — Firmware and software image security analysis" metadata: openclaw: category: "security" diff --git a/src/api/generate_skills.rs b/src/api/generate_skills.rs index 9eec0f3..6eaa8fa 100644 --- a/src/api/generate_skills.rs +++ b/src/api/generate_skills.rs @@ -160,19 +160,7 @@ fn render_resource_skill( ) -> String { let skill_name = format!("{service_alias}-{name}"); - let description = match name { - "scans" => format!( - "List scans, check status, get CVE/malware/hardening results, security scores, and reports via the {service_alias} CLI" - ), - "objects" => format!( - "List, create, update, delete objects (devices, firmware products, containers) via the {service_alias} CLI" - ), - "health" => format!("Check {service_alias} API service health and availability"), - _ => format!( - "Manage {name} via the {service_alias} API — {}", - entry.description - ), - }; + let description = build_resource_description(service_alias, name, resource, entry); let mut out = format!( r#"--- @@ -301,7 +289,77 @@ fn render_sub_resources(out: &mut String, resource: &DiscoveryResource) { writeln!(out).unwrap(); } -/// Truncate a description to its first sentence/line, max 120 chars. +/// Build a skill description dynamically from the discovery doc's method and +/// sub-resource names, so new services work without code changes. +/// +/// Produces strings like: +/// "Create, Delete, Get, List, Update objects; plus scans via the analyzer CLI" +fn build_resource_description( + service_alias: &str, + name: &str, + resource: &DiscoveryResource, + entry: &ServiceEntry, +) -> String { + let methods_part: String = { + let actions: Vec<_> = resource + .methods + .keys() + .map(|k| capitalize_first(k)) + .collect(); + if actions.is_empty() { + capitalize_first(name) + } else { + actions.join(", ") + } + }; + + // List sub-resources for context; cap at 3 to keep the description short. + let sub_names: Vec<_> = resource.resources.keys().collect(); + let sub_part = match sub_names.len() { + 0 => String::new(), + 1..=4 => format!( + "; plus {}", + sub_names + .iter() + .map(|k| k.as_str()) + .collect::>() + .join(", ") + ), + _ => format!( + "; plus {}, and {} more", + sub_names[..3] + .iter() + .map(|k| k.as_str()) + .collect::>() + .join(", "), + sub_names.len() - 3 + ), + }; + + truncate_desc(&format!( + "{methods_part} {name}{sub_part} via the {service_alias} CLI — {}", + entry.description + )) +} + +fn capitalize_first(s: &str) -> String { + let mut c = s.chars(); + match c.next() { + None => String::new(), + Some(first) => { + let upper: String = first.to_uppercase().collect(); + format!("{upper}{}", c.as_str()) + } + } +} + +/// Truncate a description to its first sentence/line. +/// +/// Limit is 160 chars — long enough for auto-generated descriptions that +/// include method names, sub-resources, and the service tagline, while +/// still fitting comfortably in Claude Code's skill frontmatter index. +const DESCRIPTION_MAX_LEN: usize = 160; + fn truncate_desc(desc: &str) -> String { let first_line = desc.lines().next().unwrap_or(""); // Cut at first sentence end if within limit @@ -309,8 +367,8 @@ fn truncate_desc(desc: &str) -> String { .find(". ") .map(|i| &first_line[..=i]) .unwrap_or(first_line); - if truncated.len() > 120 { - format!("{}...", &truncated[..117]) + if truncated.len() > DESCRIPTION_MAX_LEN { + format!("{}...", &truncated[..DESCRIPTION_MAX_LEN - 3]) } else { truncated.to_string() } @@ -393,7 +451,7 @@ mod tests { fn truncate_long_desc() { let long = "a".repeat(200); let result = truncate_desc(&long); - assert!(result.len() <= 120); + assert!(result.len() <= DESCRIPTION_MAX_LEN); assert!(result.ends_with("...")); } } From 51041437db12cb8dacca7323ba9cdabe8d7b106e Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 15:46:47 +0100 Subject: [PATCH 29/38] feat(dommyrock-analyzer-cli): update cache when --discovery is run to manuly re-generate new discovery.json from latest openapi.json spec --- src/commands/init_agent.rs | 5 ++++- src/discovery.rs | 31 ++++++++++++++++++++++++++++--- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs index 0010912..847da36 100644 --- a/src/commands/init_agent.rs +++ b/src/commands/init_agent.rs @@ -32,7 +32,10 @@ pub async fn init_agent_to_dir(claude_dir: &Path, discovery_flag: Option<&str>) for entry in services::SERVICES { let doc = if let Some(flag) = discovery_flag { let source = discovery::resolve_source(Some(flag))?; - discovery::load(&source).await? + let doc = discovery::load(&source).await?; + // Warm the cache so subsequent runs without --discovery use this version. + discovery::warm_cache(entry, &doc); + doc } else { discovery::load_for_service(entry).await? }; diff --git a/src/discovery.rs b/src/discovery.rs index 2f513b9..d28c581 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -13,7 +13,7 @@ use serde::{Deserialize, Serialize}; use crate::services::ServiceEntry; /// Top-level Discovery Document. -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[allow(dead_code)] pub struct DiscoveryDocument { @@ -30,7 +30,7 @@ pub struct DiscoveryDocument { } /// A recursive resource node containing methods and child resources. -#[derive(Debug, Clone, Default, Deserialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct DiscoveryResource { #[serde(default)] pub methods: BTreeMap, @@ -39,7 +39,7 @@ pub struct DiscoveryResource { } /// A single API method. -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[allow(dead_code)] pub struct DiscoveryMethod { @@ -123,6 +123,31 @@ pub async fn load(source: &DiscoverySource) -> Result { serde_json::from_str(&json_str).context("failed to parse discovery document") } +/// Write a discovery document to the service cache (best-effort). +/// +/// Called when `--discovery` provides a local file so subsequent runs +/// without the flag pick up the fresh content. +/// +/// The cache filename includes the version (`{api_name}_{version}.json`), so a version bump in +/// `services.rs` creates a new entry and the old one becomes stale. +pub fn warm_cache(entry: &ServiceEntry, doc: &DiscoveryDocument) { + let cache_dir = dirs::cache_dir() + .unwrap_or_else(|| PathBuf::from(".cache")) + .join("analyzer"); + if std::fs::create_dir_all(&cache_dir).is_err() { + return; + } + let cache_file = cache_dir.join(format!("{}_{}.json", entry.api_name, entry.version)); + if let Ok(json) = serde_json::to_string(doc) { + if let Err(e) = std::fs::write(&cache_file, json) { + eprintln!( + "warning: failed to warm cache {}: {e}", + cache_file.display() + ); + } + } +} + /// Load discovery document for a registered service with 24h file cache. /// /// Cache location: `~/.cache/analyzer/{api_name}_{version}.json` From b7761e2b9a3138536961218ec266820853f8fc94 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 16:37:50 +0100 Subject: [PATCH 30/38] feat(dommyrock-analyzer-cli): use openapi-to-discovery crate from ssh://git@github.com/exein-io/openapi-to-discovery --- .cargo/config.toml | 2 ++ Cargo.toml | 31 +++++++++++++++++++-- src/discovery.rs | 60 ++++++++++++++++++++++++++++++++++------ src/services.rs | 20 +++++--------- tests/discovery_cache.rs | 4 +-- 5 files changed, 90 insertions(+), 27 deletions(-) create mode 100644 .cargo/config.toml diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..c91c3f3 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[net] +git-fetch-with-cli = true diff --git a/Cargo.toml b/Cargo.toml index 3f0697c..4de5a99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,11 +15,36 @@ name = "analyzer" path = "src/main.rs" [dependencies] -clap = { version = "4", features = ["derive", "env", "color", "help", "usage", "error-context", "suggestions", "wrap_help", "string"] } -reqwest = { version = "0.12", default-features = false, features = ["json", "multipart", "stream", "rustls-tls"] } + +# internal +openapi-to-discovery = { git = "ssh://git@github.com/exein-io/openapi-to-discovery" } + +# 3rdparty +clap = { version = "4", features = [ + "derive", + "env", + "color", + "help", + "usage", + "error-context", + "suggestions", + "wrap_help", + "string", +] } +reqwest = { version = "0.12", default-features = false, features = [ + "json", + "multipart", + "stream", + "rustls-tls", +] } serde = { version = "1", features = ["derive"] } serde_json = "1" -tokio = { version = "1", features = ["rt-multi-thread", "macros", "fs", "io-util"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "macros", + "fs", + "io-util", +] } tokio-util = { version = "0.7", features = ["io"] } tokio-stream = "0.1" futures = "0.3" diff --git a/src/discovery.rs b/src/discovery.rs index d28c581..28002dd 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -104,15 +104,18 @@ pub fn resolve_source(flag: Option<&str>) -> Result { } } -/// Load and parse the discovery document from the resolved source. +/// Load and parse a discovery document from the resolved source. +/// +/// Auto-detects whether the input is an OpenAPI spec (has `"openapi"` key) or +/// an already-converted discovery document, and converts if necessary. pub async fn load(source: &DiscoverySource) -> Result { let json_str = match source { DiscoverySource::File(path) => std::fs::read_to_string(path) - .with_context(|| format!("failed to read discovery file: {}", path.display()))?, + .with_context(|| format!("failed to read file: {}", path.display()))?, DiscoverySource::Url(url) => { let resp = reqwest::get(url) .await - .with_context(|| format!("failed to fetch discovery document from {url}"))?; + .with_context(|| format!("failed to fetch from {url}"))?; let status = resp.status(); if !status.is_success() { bail!("fetch {url} returned HTTP {status}"); @@ -120,7 +123,44 @@ pub async fn load(source: &DiscoverySource) -> Result { resp.text().await? } }; - serde_json::from_str(&json_str).context("failed to parse discovery document") + parse_or_convert(&json_str) +} + +/// Parse a JSON string as either a discovery document or an OpenAPI spec. +/// +/// If the JSON contains an `"openapi"` key, it's treated as an OpenAPI spec +/// and converted to discovery format. Otherwise it's parsed directly. +fn parse_or_convert(json_str: &str) -> Result { + let probe: serde_json::Value = + serde_json::from_str(json_str).context("input is not valid JSON")?; + if probe.get("openapi").is_some() { + let spec = openapi_to_discovery::parse_openapi_str(json_str) + .map_err(|e| anyhow::anyhow!("failed to parse OpenAPI spec: {e}"))?; + let doc = openapi_to_discovery::transform(&spec, None, None); + let discovery_json = + serde_json::to_string(&doc).context("failed to serialize discovery document")?; + serde_json::from_str(&discovery_json).context("failed to parse converted discovery doc") + } else { + serde_json::from_str(json_str).context("failed to parse discovery document") + } +} + +/// Convert an OpenAPI JSON string to discovery format JSON using `openapi-to-discovery`. +fn convert_openapi_to_discovery_json(openapi_json: &str, entry: &ServiceEntry) -> Result { + let spec = openapi_to_discovery::parse_openapi_str(openapi_json) + .map_err(|e| anyhow::anyhow!("failed to parse OpenAPI spec: {e}"))?; + let name_override = if entry.api_name.is_empty() { + None + } else { + Some(entry.api_name) + }; + let version_override = if entry.version.is_empty() { + None + } else { + Some(entry.version) + }; + let doc = openapi_to_discovery::transform(&spec, name_override, version_override); + serde_json::to_string(&doc).context("failed to serialize discovery document") } /// Write a discovery document to the service cache (best-effort). @@ -180,16 +220,18 @@ pub async fn load_for_service_with_cache( } } - // Fetch from URL (fall back to stale cache on failure or non-200 response) + // Fetch OpenAPI spec, convert to discovery format, cache the result. + // Falls back to stale cache on failure or non-200 response. let fetch_result = async { - let resp = reqwest::get(entry.discovery_url) + let resp = reqwest::get(entry.openapi_url) .await - .with_context(|| format!("failed to fetch {}", entry.discovery_url))?; + .with_context(|| format!("failed to fetch {}", entry.openapi_url))?; let status = resp.status(); if !status.is_success() { - bail!("fetch {} returned HTTP {status}", entry.discovery_url); + bail!("fetch {} returned HTTP {status}", entry.openapi_url); } - resp.text().await.context("failed to read response body") + let openapi_json = resp.text().await.context("failed to read response body")?; + convert_openapi_to_discovery_json(&openapi_json, entry) } .await; diff --git a/src/services.rs b/src/services.rs index b8226fe..e03a237 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,12 +1,13 @@ /// Compile-time service registry. /// /// Each entry maps one or more CLI aliases to an API name, version, -/// and the URL where its Discovery Document can be fetched. +/// and the URL where its OpenAPI spec can be fetched. The CLI converts +/// the OpenAPI spec to Discovery format in-process using `openapi-to-discovery`. pub struct ServiceEntry { pub aliases: &'static [&'static str], pub api_name: &'static str, pub version: &'static str, - pub discovery_url: &'static str, + pub openapi_url: &'static str, pub description: &'static str, } @@ -15,24 +16,17 @@ pub const SERVICES: &[ServiceEntry] = &[ aliases: &["analyzer"], api_name: "analyzer-api-discovery", version: "0.5.0", - discovery_url: "https://analyzer.exein.dev/analyzer-discovery.json", + openapi_url: "https://analyzer.exein.dev/api-doc/openapi.json", description: "Firmware and software image security analysis", }, - // Future entries (same domain, different discovery files): + // Future entries: // ServiceEntry { // aliases: &["isaac"], // api_name: "isaac-api", // version: "1.0.0", - // discovery_url: "https://analyzer.exein.dev/isaac-discovery.json", + // openapi_url: "https://analyzer.exein.dev/isaac/api-doc/openapi.json", // description: "Device identity and attestation", // }, - // ServiceEntry { - // aliases: &["vuln-tracker"], - // api_name: "vulnerability-tracker-api", - // version: "1.0.0", - // discovery_url: "https://analyzer.exein.dev/vuln-tracker-discovery.json", - // description: "Vulnerability tracking and advisory management", - // }, ]; /// Resolve a service alias to its registry entry. @@ -56,7 +50,7 @@ mod tests { fn resolve_known_service() { let entry = resolve_service("analyzer").expect("should resolve"); assert_eq!(entry.api_name, "analyzer-api-discovery"); - assert_eq!(entry.version, "0.5.0"); + assert!(entry.openapi_url.contains("openapi.json")); } #[test] diff --git a/tests/discovery_cache.rs b/tests/discovery_cache.rs index 94ff9be..938130c 100644 --- a/tests/discovery_cache.rs +++ b/tests/discovery_cache.rs @@ -28,7 +28,7 @@ fn unreachable_service() -> ServiceEntry { aliases: &["test"], api_name: "test-api", version: "1.0.0", - discovery_url: "http://127.0.0.1:1/never-reachable.json", + openapi_url: "http://127.0.0.1:1/never-reachable.json", description: "test service", } } @@ -92,7 +92,7 @@ mod non_200_response { aliases: &["test"], api_name: "test-api", version: "1.0.0", - discovery_url: leaked, + openapi_url: leaked, description: "test service", } } From 78737c618c2378c05123d276f14c427d2c0a3eb8 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Wed, 25 Mar 2026 17:03:22 +0100 Subject: [PATCH 31/38] feat(dommyrock-analyzer-cli): rename mod to agent_api --- src/{api => agent_api}/executor.rs | 0 src/{api => agent_api}/generate_skills.rs | 0 src/{api => agent_api}/mod.rs | 0 src/{api => agent_api}/schema.rs | 0 src/commands/init_agent.rs | 2 +- src/lib.rs | 2 +- src/main.rs | 14 +++++++------- 7 files changed, 9 insertions(+), 9 deletions(-) rename src/{api => agent_api}/executor.rs (100%) rename src/{api => agent_api}/generate_skills.rs (100%) rename src/{api => agent_api}/mod.rs (100%) rename src/{api => agent_api}/schema.rs (100%) diff --git a/src/api/executor.rs b/src/agent_api/executor.rs similarity index 100% rename from src/api/executor.rs rename to src/agent_api/executor.rs diff --git a/src/api/generate_skills.rs b/src/agent_api/generate_skills.rs similarity index 100% rename from src/api/generate_skills.rs rename to src/agent_api/generate_skills.rs diff --git a/src/api/mod.rs b/src/agent_api/mod.rs similarity index 100% rename from src/api/mod.rs rename to src/agent_api/mod.rs diff --git a/src/api/schema.rs b/src/agent_api/schema.rs similarity index 100% rename from src/api/schema.rs rename to src/agent_api/schema.rs diff --git a/src/commands/init_agent.rs b/src/commands/init_agent.rs index 847da36..af5d21b 100644 --- a/src/commands/init_agent.rs +++ b/src/commands/init_agent.rs @@ -8,7 +8,7 @@ use std::path::Path; use anyhow::{Context, Result}; -use crate::api::generate_skills; +use crate::agent_api::generate_skills; use crate::discovery; use crate::output; use crate::services; diff --git a/src/lib.rs b/src/lib.rs index 188cf4c..0079495 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,7 @@ //! //! The binary is in `main.rs`; this crate exposes modules needed by `tests/`. -pub mod api; +pub mod agent_api; pub mod client; pub mod config; pub mod discovery; diff --git a/src/main.rs b/src/main.rs index e2a0221..40cb01a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ //! Scan firmware and container images for vulnerabilities, generate SBOMs, //! check CRA compliance, and more. -mod api; +mod agent_api; mod client; mod commands; mod config; @@ -566,7 +566,7 @@ async fn run(cli: Cli) -> Result<()> { discovery::load_for_service(entry).await? }; - let api_cmd = api::build_api_command(&doc); + let api_cmd = agent_api::build_api_command(&doc); let api_matches = match api_cmd.try_get_matches_from( std::iter::once("api".to_string()).chain(rest_args.iter().cloned()), ) { @@ -578,7 +578,7 @@ async fn run(cli: Cli) -> Result<()> { }; // Client creation is deferred — dispatch will call make_client only // if the request isn't a dry-run. - api::dispatch( + agent_api::dispatch( &doc, &api_matches, api_key.as_deref(), @@ -611,7 +611,7 @@ async fn run(cli: Cli) -> Result<()> { }; // rest_path = "scans.create" — pass with "api." prefix for the resource tree - api::schema::handle_schema_command(&doc, &format!("api.{rest_path}")) + agent_api::schema::handle_schema_command(&doc, &format!("api.{rest_path}")) } Command::GenerateSkills => { @@ -629,10 +629,10 @@ async fn run(cli: Cli) -> Result<()> { } else { discovery::load_for_service(entry).await? }; - api::generate_skills::generate_for_service(&doc, entry, skills_dir)?; - api::generate_skills::generate_shared(entry, skills_dir)?; + agent_api::generate_skills::generate_for_service(&doc, entry, skills_dir)?; + agent_api::generate_skills::generate_shared(entry, skills_dir)?; } - api::generate_skills::write_skills_index(skills_dir)?; + agent_api::generate_skills::write_skills_index(skills_dir)?; println!("\nDone."); Ok(()) } From 1a5dff1f7b66213df284e5dd497938a411a0fef2 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 14:23:27 +0100 Subject: [PATCH 32/38] feat(dommyrock-analyzer-cli): append bearer token when calling openapi endpoint --- src/discovery.rs | 22 +++++++++++++++++++++- tests/api_executor.rs | 2 +- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/discovery.rs b/src/discovery.rs index 28002dd..1523612 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -145,6 +145,19 @@ fn parse_or_convert(json_str: &str) -> Result { } } +/// Best-effort API key resolution for authenticated OpenAPI spec fetches. +/// +/// Checks `ANALYZER_API_KEY` env var, then falls back to the default profile +/// in the config file. Returns `None` if no key is configured (the fetch +/// proceeds without auth — works for public endpoints or local dev). +fn resolve_api_key() -> Option { + std::env::var("ANALYZER_API_KEY").ok().or_else(|| { + crate::config::ConfigFile::load() + .ok() + .and_then(|cfg| cfg.profile(None).api_key.clone()) + }) +} + /// Convert an OpenAPI JSON string to discovery format JSON using `openapi-to-discovery`. fn convert_openapi_to_discovery_json(openapi_json: &str, entry: &ServiceEntry) -> Result { let spec = openapi_to_discovery::parse_openapi_str(openapi_json) @@ -222,8 +235,15 @@ pub async fn load_for_service_with_cache( // Fetch OpenAPI spec, convert to discovery format, cache the result. // Falls back to stale cache on failure or non-200 response. + // The OpenAPI endpoint may require auth, so we attach a Bearer token + // if an API key is available from config/env. let fetch_result = async { - let resp = reqwest::get(entry.openapi_url) + let mut req = reqwest::Client::new().get(entry.openapi_url); + if let Some(api_key) = resolve_api_key() { + req = req.bearer_auth(api_key); + } + let resp = req + .send() .await .with_context(|| format!("failed to fetch {}", entry.openapi_url))?; let status = resp.status(); diff --git a/tests/api_executor.rs b/tests/api_executor.rs index 67da535..88ceb7c 100644 --- a/tests/api_executor.rs +++ b/tests/api_executor.rs @@ -8,7 +8,7 @@ use serde_json::json; use wiremock::matchers::{method, path, query_param, query_param_is_missing}; use wiremock::{Mock, MockServer, ResponseTemplate}; -use analyzer_cli::api::executor::{PaginationConfig, execute_method}; +use analyzer_cli::agent_api::executor::{PaginationConfig, execute_method}; use analyzer_cli::client::AnalyzerClient; use analyzer_cli::discovery::{DiscoveryMethod, DiscoveryParameter}; use analyzer_cli::output::Format; From 6a4476e2dc45e712a6279218c1a233e1e06f0c3b Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 15:13:51 +0100 Subject: [PATCH 33/38] feat(dommyrock-analyzer-cli): env config for common env's like dev,stage,prod --- README.md | 78 ++++++++++++++++++++++++++++---------------- example.env | 11 +++++-- src/commands/auth.rs | 11 ++++++- src/config.rs | 36 ++++++++++++++++++++ src/main.rs | 20 ++++++------ 5 files changed, 113 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 4314391..a1065c7 100644 --- a/README.md +++ b/README.md @@ -83,14 +83,22 @@ analyzer scan sbom --object a1b2c3d4-... -O sbom.json # Interactive login (prompts for API key, validates, saves) analyzer login -# Use a specific server URL -analyzer login --url https://my-analyzer.example.com/api/ +# Login to a specific environment +analyzer login --env dev # https://analyzer.exein.dev/api/ +analyzer login --env stage # https://analyzer.exein.live/api/ +analyzer login --env prod # https://analyzer.exein.io/api/ (default) + +# Or use an explicit URL (for self-hosted instances) +analyzer login --url https://my-instance.example.com/api/ # Login to a named profile -analyzer login --profile staging +analyzer login --env dev --profile dev -# Check your current identity +# Check your current identity and environment analyzer whoami +# Profile: default +# URL: https://analyzer.exein.dev/api/ (Dev) +# API Key: r4ZW...VZOC ``` ### Objects @@ -251,11 +259,13 @@ default_profile = "default" api_key = "your-api-key" url = "https://analyzer.exein.io/api/" -[profiles.staging] -api_key = "staging-key" -url = "https://staging.analyzer.exein.io/api/" +[profiles.dev] +api_key = "dev-api-key" +url = "https://analyzer.exein.dev/api/" ``` +Switch profiles with `--profile dev` or `export ANALYZER_PROFILE=dev`. + ### Precedence Settings are resolved in this order (highest priority first): @@ -277,21 +287,30 @@ Settings are resolved in this order (highest priority first): ## Agent mode -The CLI includes a discovery-driven layer designed for AI agents. While the commands above are human-friendly (named flags, progress bars, `--wait`), the agent layer exposes the full API surface dynamically from a [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) — no hardcoded commands. +The CLI includes a discovery-driven layer designed for AI agents (Claude Code, etc.). While the commands above are human-friendly (named flags, progress bars, `--wait`), the agent layer exposes the full API surface dynamically — no hardcoded commands. + +The CLI fetches each service's OpenAPI spec, converts it to a [Discovery Document](https://developers.google.com/discovery/v1/reference/apis) in-process (via [openapi-to-discovery](https://github.com/exein-io/openapi-to-discovery)), and uses that to generate CLI commands, schema introspection, and agent skills at runtime. + +### Setup (Claude Code) -Agents read [CONTEXT.md](CONTEXT.md) at session start for syntax, rules, and examples. +```bash +# 1. Login to your target environment +analyzer login --env dev + +# 2. Install agent integration (fetches OpenAPI, generates skills, configures permissions) +analyzer init-agent -### Setup +# 3. Start a new Claude Code session — skills are loaded automatically +``` -Point the CLI at a discovery document (local file or URL): +For local development (API running on localhost): ```bash -# Via flag -analyzer --discovery ./analyzer-discovery.json api objects list +# Point at a local OpenAPI spec (auto-detected and converted) +analyzer --discovery ./openapi.json init-agent -# Via environment variable -export ANALYZER_DISCOVERY_URL=https://analyzer.exein.dev/discovery.json -analyzer api scans list +# Or point at a pre-converted discovery file +analyzer --discovery ./analyzer-discovery.json init-agent ``` ### `analyzer api` — dynamic API access @@ -300,16 +319,14 @@ All API resources and methods are generated at runtime from the discovery docume ```bash # List objects -analyzer api objects list --params '{"limit": 10}' +analyzer api analyzer objects list --params '{"limit": 10}' --fields "id,name" # Create a scan (dry-run first) -analyzer api scans create --json '{"name": "test", ...}' --dry-run - -# Get scan results with pagination -analyzer api scans results get --params '{"scan_id": "ID", "analysis_id": "cve", "page": 1}' +analyzer api analyzer scans create --json '{"name": "test", ...}' --dry-run -# Check CRA compliance -analyzer api scans compliance-check cyber-resilience-act list --params '{"id": "SCAN_ID"}' +# Get scan results +analyzer api analyzer scans results get \ + --params '{"scan_id": "ID", "analysis_id": "UUID", "query": "sort-by=severity&sort-ord=asc"}' ``` **Flags for `api` commands:** @@ -318,21 +335,24 @@ analyzer api scans compliance-check cyber-resilience-act list --params '{"id": " |------|---------| | `--params ''` | Path and query parameters | | `--json ''` | Request body for POST/PUT/PATCH | +| `--fields ''` | Limit response fields (protects agent context window) | | `--dry-run` | Print the request without executing | +| `--format ` | Output: `human` (default), `json`, `table`, `csv` | +| `--page-all` | Auto-paginate results as NDJSON | ### `analyzer schema` — introspect method signatures -Dump the full method signature (HTTP method, path, parameters, request/response schemas) as JSON: +Agents use this to discover parameters, types, and request/response schemas at runtime: ```bash # Inspect a specific method -analyzer schema api.scans.create +analyzer schema analyzer.scans.create # Browse available methods under a resource -analyzer schema api.scans +analyzer schema analyzer.scans # Full tree -analyzer schema api +analyzer schema analyzer.api ``` ### `analyzer generate-skills` — generate skill files @@ -340,9 +360,9 @@ analyzer schema api Reads the discovery document and writes markdown skill files to `skills/`: ```bash -analyzer --discovery ./analyzer-discovery.json generate-skills +analyzer generate-skills ls skills/ -# analyzer-objects/ analyzer-scans/ analyzer-shared/ +# analyzer-objects/ analyzer-scans/ analyzer-health/ analyzer-shared/ ``` See [CONTEXT.md](CONTEXT.md) for the full agent reference. diff --git a/example.env b/example.env index 46bda28..9e261ac 100644 --- a/example.env +++ b/example.env @@ -2,12 +2,17 @@ # # The CLI reads these via --flags and env vars (see `analyzer --help`). # Precedence: CLI flags > env vars > config file > defaults. +# +# Environments (use `analyzer login --env ` to switch): +# dev = https://analyzer.exein.dev/api/ +# stage = https://analyzer.exein.live/api/ +# prod = https://analyzer.exein.io/api/ (default) -# Override the API base URL (default: https://analyzer.exein.io/api/) +# Override the API base URL for local development ANALYZER_URL=http://localhost:8000/ # API key (or use `analyzer login` to save interactively) # ANALYZER_API_KEY=your-api-key -# Override the discovery document source (file path or URL) -# ANALYZER_DISCOVERY_URL=./analyzer-discovery.json +# Override the discovery/OpenAPI source (file path or URL) +# ANALYZER_DISCOVERY_URL=./openapi.json diff --git a/src/commands/auth.rs b/src/commands/auth.rs index 32a567e..da5b8af 100644 --- a/src/commands/auth.rs +++ b/src/commands/auth.rs @@ -117,10 +117,19 @@ pub fn run_whoami(api_key: Option<&str>, url: Option<&str>, profile: Option<&str None => "(not set)".to_string(), }; + let env_label = crate::config::Environment::from_url(&resolved_url) + .map(|e| format!(" ({e:?})")) + .unwrap_or_default(); + eprintln!("{}", style("Analyzer CLI").bold().underlined()); eprintln!(); eprintln!(" {:>12} {}", style("Profile:").bold(), profile_name); - eprintln!(" {:>12} {}", style("URL:").bold(), resolved_url); + eprintln!( + " {:>12} {}{}", + style("URL:").bold(), + resolved_url, + style(env_label).dim() + ); eprintln!(" {:>12} {}", style("API Key:").bold(), masked_key); if let Ok(path) = ConfigFile::path() { diff --git a/src/config.rs b/src/config.rs index f573560..45d32bf 100644 --- a/src/config.rs +++ b/src/config.rs @@ -18,6 +18,42 @@ const CONFIG_FILE_NAME: &str = "config.toml"; const DEFAULT_URL: &str = "https://analyzer.exein.io/api/"; const DEFAULT_PROFILE: &str = "default"; +/// Named environments with pre-configured API URLs. +/// +/// Use with `analyzer login --env dev` instead of remembering full URLs. +#[derive(Debug, Clone, Copy, clap::ValueEnum)] +pub enum Environment { + /// Development — https://analyzer.exein.dev/api/ + Dev, + /// Staging — https://analyzer.exein.live/api/ + Stage, + /// Production — https://analyzer.exein.io/api/ + Prod, +} + +impl Environment { + pub fn url(self) -> &'static str { + match self { + Self::Dev => "https://analyzer.exein.dev/api/", + Self::Stage => "https://analyzer.exein.live/api/", + Self::Prod => "https://analyzer.exein.io/api/", + } + } + + /// Detect which environment a URL belongs to, if any. + pub fn from_url(url: &str) -> Option { + if url.contains("exein.dev") { + Some(Self::Dev) + } else if url.contains("exein.live") { + Some(Self::Stage) + } else if url.contains("exein.io") { + Some(Self::Prod) + } else { + None + } + } +} + /// Resolved runtime configuration, ready to use. #[derive(Debug, Clone)] pub struct ResolvedConfig { diff --git a/src/main.rs b/src/main.rs index 40cb01a..758f654 100644 --- a/src/main.rs +++ b/src/main.rs @@ -21,6 +21,7 @@ use uuid::Uuid; use crate::client::AnalyzerClient; use crate::client::models::{AnalysisType, ComplianceType}; +use crate::config::Environment; use crate::output::Format; /// Exein Analyzer CLI — firmware & container security scanning. @@ -66,8 +67,11 @@ enum Command { /// Authenticate and save your API key. Login { /// Server URL to authenticate against. - #[arg(long)] + #[arg(long, conflicts_with = "env")] url: Option, + /// Target environment (shortcut for --url). + #[arg(long, value_enum)] + env: Option, /// Profile name to save credentials under. #[arg(long)] profile: Option, @@ -128,8 +132,6 @@ enum Command { InitAgent, } -// -- Config subcommands ------------------------------------------------------- - #[derive(Subcommand)] enum ConfigCommand { /// Show all configuration. @@ -151,8 +153,6 @@ enum ConfigCommand { }, } -// -- Object subcommands ------------------------------------------------------- - #[derive(Subcommand)] enum ObjectCommand { /// List all objects. @@ -175,8 +175,6 @@ enum ObjectCommand { }, } -// -- Scan subcommands --------------------------------------------------------- - #[derive(Subcommand)] enum ScanCommand { /// Create a new scan. @@ -379,8 +377,6 @@ enum ScanCommand { }, } -// ============================================================================= - #[tokio::main] async fn main() -> ExitCode { let cli = Cli::parse(); @@ -405,8 +401,12 @@ async fn run(cli: Cli) -> Result<()> { // -- Auth (no API key required) ----------------------------------- Command::Login { url: login_url, + env: login_env, profile: login_profile, - } => commands::auth::run_login(login_url.as_deref(), login_profile.as_deref()).await, + } => { + let resolved_url = login_url.as_deref().or_else(|| login_env.map(|e| e.url())); + commands::auth::run_login(resolved_url, login_profile.as_deref()).await + } Command::Whoami => { commands::auth::run_whoami(api_key.as_deref(), url.as_deref(), profile.as_deref()) From dc4027adb605b664b2d5a60c56b59e15e5ebef58 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 15:30:38 +0100 Subject: [PATCH 34/38] feat(dommyrock-analyzer-cli): query param pagination skill hints --- src/agent_api/generate_skills.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agent_api/generate_skills.rs b/src/agent_api/generate_skills.rs index 6eaa8fa..31f0d28 100644 --- a/src/agent_api/generate_skills.rs +++ b/src/agent_api/generate_skills.rs @@ -239,7 +239,7 @@ The `query` value contains filter params. Required fields per analysis type (all - **kernel:** `sort-by=features` - **software-bom:** `sort-by=name` -Optional: `&page=1&per-page=25`, `&search=openssl`, `&severity-filter=critical`. +Optional: `&page=1&per-page=25` (both required together, never use one without the other), `&search=openssl`, `&severity-filter=critical`. "# ) .unwrap(); From 93c770771f2ed222b569850a7a722df6c12abde2 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 16:02:18 +0100 Subject: [PATCH 35/38] feat(dommyrock-analyzer-cli): update ci/cd to be able to pull private repo --- .github/workflows/ci.yml | 9 +++++++++ .github/workflows/release.yml | 3 +++ .github/workflows/update-discovery.yml | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de35d32..5819094 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - run: cargo check --all-targets @@ -25,6 +28,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - run: cargo test @@ -44,6 +50,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable with: components: clippy diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 116808f..80b3a44 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,6 +41,9 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/setup-cross-toolchain-action@v1 diff --git a/.github/workflows/update-discovery.yml b/.github/workflows/update-discovery.yml index 8091f02..20332ce 100644 --- a/.github/workflows/update-discovery.yml +++ b/.github/workflows/update-discovery.yml @@ -49,6 +49,12 @@ jobs: done echo "changed=$changed" >> "$GITHUB_OUTPUT" + - name: Configure SSH for private dependencies + if: steps.diff.outputs.changed == 'true' + uses: webfactory/ssh-agent@v0.9.0 + with: + ssh-private-key: ${{ secrets.OPENAPI_TO_DISCOVERY_DEPLOY_KEY }} + - name: Install Rust toolchain if: steps.diff.outputs.changed == 'true' uses: dtolnay/rust-toolchain@stable From 9fd72a689d5ad6048393ad26f6bfb7f0402d0759 Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 16:14:19 +0100 Subject: [PATCH 36/38] feat(dommyrock-analyzer-cli): fix ci/cd build for :finding structs that were never used on main --- src/client/mod.rs | 1 + src/client/models.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/client/mod.rs b/src/client/mod.rs index 081fb66..f27ab83 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -1,5 +1,6 @@ //! Typed HTTP client for the Analyzer API. +#[allow(dead_code)] pub mod models; use std::path::Path; diff --git a/src/client/models.rs b/src/client/models.rs index 6ab9531..7a84632 100644 --- a/src/client/models.rs +++ b/src/client/models.rs @@ -2,6 +2,8 @@ //! //! These types are owned by the CLI and match the API's serialization format. //! No dependency on the `analyzer-api` crate. +//! +//! Some finding types are defined ahead of their use in human-mode commands. use std::collections::HashMap; use std::fmt; From 263bb6a4845a5824da6fabed9398831b5c56f8cc Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 16:19:34 +0100 Subject: [PATCH 37/38] feat(dommyrock-analyzer-cli): rm login fallback for local setup after login env fixes --- src/agent_api/mod.rs | 14 ++++++++++---- src/client/mod.rs | 6 ------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/agent_api/mod.rs b/src/agent_api/mod.rs index 0262f25..6b81e40 100644 --- a/src/agent_api/mod.rs +++ b/src/agent_api/mod.rs @@ -170,10 +170,16 @@ pub async fn dispatch( } let cfg = config::resolve_for_discovery(api_key, url, profile)?; - let client = match &cfg.api_key { - Some(key) => AnalyzerClient::new(cfg.url, key)?, - None => AnalyzerClient::new_anonymous(cfg.url)?, - }; + let api_key = cfg.api_key.ok_or_else(|| { + anyhow::anyhow!( + "no API key provided\n\n\ + Set it with one of:\n \ + analyzer login --env dev\n \ + analyzer --api-key ...\n \ + export ANALYZER_API_KEY=" + ) + })?; + let client = AnalyzerClient::new(cfg.url, &api_key)?; executor::execute_method( Some(&client), method, diff --git a/src/client/mod.rs b/src/client/mod.rs index f27ab83..10d1dac 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -45,12 +45,6 @@ impl AnalyzerClient { Ok(Self { client, base_url }) } - /// Create a client with no auth headers (for local development). - pub fn new_anonymous(base_url: Url) -> Result { - let client = Client::builder().user_agent(APP_USER_AGENT).build()?; - - Ok(Self { client, base_url }) - } // -- Health --------------------------------------------------------------- From 41c21c88d7ab1cfcf6e8bfd3dfbf7d732f6811df Mon Sep 17 00:00:00 2001 From: Dominik Polzer Date: Fri, 27 Mar 2026 16:20:07 +0100 Subject: [PATCH 38/38] feat(dommyrock-analyzer-cli): fix test --- src/client/mod.rs | 1 - src/discovery.rs | 53 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/src/client/mod.rs b/src/client/mod.rs index 10d1dac..d66938d 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -45,7 +45,6 @@ impl AnalyzerClient { Ok(Self { client, base_url }) } - // -- Health --------------------------------------------------------------- pub async fn health(&self) -> Result { diff --git a/src/discovery.rs b/src/discovery.rs index 1523612..100b218 100644 --- a/src/discovery.rs +++ b/src/discovery.rs @@ -314,21 +314,50 @@ pub fn resolve_resource<'a>( mod tests { use super::*; + /// Minimal discovery doc fixture for unit tests (no external file needed). + const TEST_DISCOVERY: &str = r#"{ + "name": "test-api", + "version": "1.0.0", + "title": "Test", + "rootUrl": "", + "servicePath": "", + "schemas": { "Scan": { "id": "Scan" } }, + "resources": { + "api": { + "resources": { + "scans": { + "methods": { + "list": { "id": "test.scans.list", "httpMethod": "GET", "path": "api/scans" }, + "get": { "id": "test.scans.get", "httpMethod": "GET", "path": "api/scans/{id}" } + }, + "resources": { + "score": { + "methods": { + "list": { "id": "test.scans.score.list", "httpMethod": "GET", "path": "api/scans/{id}/score" } + } + } + } + } + } + } + } + }"#; + + fn test_doc() -> DiscoveryDocument { + serde_json::from_str(TEST_DISCOVERY).expect("test fixture should parse") + } + #[test] - fn loads_analyzer_discovery_json() { - let doc: DiscoveryDocument = - serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) - .unwrap(); - assert_eq!(doc.name, "analyzer-api-routes"); + fn parses_discovery_document() { + let doc = test_doc(); + assert_eq!(doc.name, "test-api"); assert!(!doc.resources.is_empty()); assert!(!doc.schemas.is_empty()); } #[test] fn resolve_method_finds_nested() { - let doc: DiscoveryDocument = - serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) - .unwrap(); + let doc = test_doc(); let api = doc.resources.get("api").unwrap(); let method = resolve_method(api, &["scans", "score", "list"]); assert!(method.is_some()); @@ -339,18 +368,14 @@ mod tests { #[test] fn resolve_method_returns_none_for_bad_path() { - let doc: DiscoveryDocument = - serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) - .unwrap(); + let doc = test_doc(); let api = doc.resources.get("api").unwrap(); assert!(resolve_method(api, &["nonexistent", "method"]).is_none()); } #[test] fn resolve_resource_finds_intermediate() { - let doc: DiscoveryDocument = - serde_json::from_str(&std::fs::read_to_string("analyzer-discovery.json").unwrap()) - .unwrap(); + let doc = test_doc(); let api = doc.resources.get("api").unwrap(); let scans = resolve_resource(api, &["scans"]); assert!(scans.is_some());