diff --git a/bin/generate-sdk b/bin/generate-sdk new file mode 100755 index 0000000..9e3dab1 --- /dev/null +++ b/bin/generate-sdk @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +Generate Python SDK from OpenAPI specification with post-generation patches. + +This script handles the complete SDK generation workflow: +1. Generate SDK using openapi-python-client +2. Copy generated files to robosystems_client/ +3. Apply post-generation patches (NDJSON handling, etc.) +4. Format and lint the generated code +""" + +import sys +import subprocess +import shutil +from pathlib import Path + + +def run_command(cmd: str, description: str) -> bool: + """Run a shell command and return success status.""" + print(f"📦 {description}...") + result = subprocess.run(cmd, shell=True, cwd=Path(__file__).parent.parent) + return result.returncode == 0 + + +def patch_ndjson_handling() -> bool: + """Add NDJSON handling to execute_cypher_query._parse_response.""" + + file_path = ( + Path(__file__).parent.parent + / "robosystems_client" + / "api" + / "query" + / "execute_cypher_query.py" + ) + + if not file_path.exists(): + print(f"❌ File not found: {file_path}") + return False + + # Read the current file content + content = file_path.read_text() + + # Check if patch is already applied + if "application/x-ndjson" in content: + print("✅ NDJSON patch already applied") + return True + + # Define the patch to insert (note: using 4-space indentation to match generated code) + ndjson_check = """ content_type = response.headers.get("content-type", "") + if ( + "application/x-ndjson" in content_type + or response.headers.get("x-stream-format") == "ndjson" + ): + return None +""" + + # Find the location to insert the patch (raw generated code uses 4 spaces) + search_pattern = " if response.status_code == 200:\n response_200 = ExecuteCypherQueryResponse200.from_dict(response.json())\n\n return response_200" + + if search_pattern not in content: + print(f"❌ Could not find expected pattern in {file_path}") + print("The generated code structure may have changed.") + return False + + # Replace the pattern with the patched version + replacement = f" if response.status_code == 200:\n{ndjson_check} response_200 = ExecuteCypherQueryResponse200.from_dict(response.json())\n\n return response_200" + patched_content = content.replace(search_pattern, replacement) + + # Write the patched content back + _ = file_path.write_text(patched_content) + + print(f"✅ Applied NDJSON patch to {file_path.name}") + return True + + +def main(): + """Main SDK generation workflow.""" + + # Get OpenAPI URL from command line or use default + url = sys.argv[1] if len(sys.argv) > 1 else "http://localhost:8000/openapi.json" + + print(f"🚀 Generating Python SDK from {url}...") + print() + + # Step 1: Generate SDK + if not run_command("rm -rf generated", "Cleaning previous generation"): + return 1 + + if not run_command( + f"uv run openapi-python-client generate --url {url} --output-path generated --config robosystems_client/sdk-config.yaml", + f"Generating SDK from {url}", + ): + return 1 + + # Step 2: Copy generated files + print("📦 Copying generated code to robosystems_client/...") + + base_path = Path(__file__).parent.parent + generated_path = base_path / "generated" / "robo_systems_api_client" + target_path = base_path / "robosystems_client" + + # Remove old generated files + for item in ["api", "models", "client.py", "errors.py", "types.py", "py.typed"]: + item_path = target_path / item + if item_path.exists(): + if item_path.is_dir(): + shutil.rmtree(item_path) + else: + item_path.unlink() + + # Copy new generated files + for item in ["api", "models", "client.py", "errors.py", "types.py", "py.typed"]: + src = generated_path / item + dst = target_path / item + if src.exists(): + if src.is_dir(): + shutil.copytree(src, dst) + else: + shutil.copy2(src, dst) + + # Clean up generated folder + shutil.rmtree(base_path / "generated") + + print() + + # Step 3: Apply patches + print("🔧 Applying post-generation patches...") + if not patch_ndjson_handling(): + print("⚠️ Warning: NDJSON patch failed, but continuing...") + + print() + + # Step 4: Format and lint + if not run_command("uv run ruff format .", "Formatting code"): + return 1 + + if not run_command("uv run ruff check . --fix", "Fixing linting issues"): + return 1 + + if not run_command("uv run ruff check .", "Running final linting check"): + return 1 + + if not run_command("uv run ruff format --check .", "Verifying formatting"): + return 1 + + print() + print("✅ SDK generation complete!") + print() + print("Changes applied:") + print(" - Generated fresh SDK from OpenAPI spec") + print(" - Applied NDJSON streaming support patch") + print(" - Formatted and linted all code") + print() + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/justfile b/justfile index a266b95..1118a96 100644 --- a/justfile +++ b/justfile @@ -45,22 +45,7 @@ typecheck: # Generate SDK from localhost API generate-sdk url="http://localhost:8000/openapi.json": - @echo "🚀 Generating Client from {{url}}..." - rm -rf generated - uv run openapi-python-client generate --url {{url}} --output-path generated --config robosystems_client/sdk-config.yaml - @echo "📦 Copying generated code to robosystems_client..." - rm -rf robosystems_client/api robosystems_client/models robosystems_client/client.py robosystems_client/errors.py robosystems_client/types.py robosystems_client/py.typed - cp -r generated/robo_systems_api_client/api robosystems_client/ - cp -r generated/robo_systems_api_client/models robosystems_client/ - cp generated/robo_systems_api_client/client.py robosystems_client/ - cp generated/robo_systems_api_client/errors.py robosystems_client/ - cp generated/robo_systems_api_client/types.py robosystems_client/ - cp generated/robo_systems_api_client/py.typed robosystems_client/ - rm -rf generated - @just format - uv run ruff check . --fix - @just lint - @echo "✅ Client generation complete!" + bin/generate-sdk {{url}} # Build python package locally (for testing) build-package: diff --git a/robosystems_client/api/agent/auto_select_agent.py b/robosystems_client/api/agent/auto_select_agent.py index 9179630..2789748 100644 --- a/robosystems_client/api/agent/auto_select_agent.py +++ b/robosystems_client/api/agent/auto_select_agent.py @@ -85,17 +85,50 @@ def sync_detailed( client: AuthenticatedClient, body: AgentRequest, ) -> Response[Union[AgentResponse, Any, ErrorResponse, HTTPValidationError]]: - """Auto-select agent for query + r"""Auto-select agent for query Automatically select the best agent for your query. - The orchestrator will: - 1. Enrich context with RAG if enabled - 2. Evaluate all available agents - 3. Select the best match based on confidence scores - 4. Execute the query with the selected agent - - Use this endpoint when you want the system to intelligently route your query. + **Agent Selection Process:** + + The orchestrator intelligently routes your query by: + 1. Analyzing query intent and complexity + 2. Enriching context with RAG if enabled + 3. Evaluating all available agents against selection criteria + 4. Selecting the best match based on confidence scores + 5. Executing the query with the selected agent + + **Available Agent Types:** + - `financial`: Financial analysis, SEC filings, company metrics + - `research`: General research, data exploration, trend analysis + - `rag`: Knowledge base search using RAG enrichment + + **Execution Modes:** + - `quick`: Fast responses (~2-5s), suitable for simple queries + - `standard`: Balanced approach (~5-15s), default mode + - `extended`: Comprehensive analysis (~15-60s), deep research + - `streaming`: Real-time response streaming + + **Confidence Score Interpretation:** + - `0.9-1.0`: High confidence, agent is ideal match + - `0.7-0.9`: Good confidence, agent is suitable + - `0.5-0.7`: Moderate confidence, agent can handle but may not be optimal + - `0.3-0.5`: Low confidence, fallback agent used + - `<0.3`: Very low confidence, consider using specific agent endpoint + + **Credit Costs:** + - Quick mode: 5-10 credits per query + - Standard mode: 15-25 credits per query + - Extended mode: 30-75 credits per query + - RAG enrichment: +5-15 credits (if enabled) + + **Use Cases:** + - Ask questions without specifying agent type + - Get intelligent routing for complex multi-domain queries + - Leverage conversation history for contextual understanding + - Enable RAG for knowledge base enrichment + + See request/response examples in the \"Examples\" dropdown below. Args: graph_id (str): @@ -127,17 +160,50 @@ def sync( client: AuthenticatedClient, body: AgentRequest, ) -> Optional[Union[AgentResponse, Any, ErrorResponse, HTTPValidationError]]: - """Auto-select agent for query + r"""Auto-select agent for query Automatically select the best agent for your query. - The orchestrator will: - 1. Enrich context with RAG if enabled - 2. Evaluate all available agents - 3. Select the best match based on confidence scores - 4. Execute the query with the selected agent - - Use this endpoint when you want the system to intelligently route your query. + **Agent Selection Process:** + + The orchestrator intelligently routes your query by: + 1. Analyzing query intent and complexity + 2. Enriching context with RAG if enabled + 3. Evaluating all available agents against selection criteria + 4. Selecting the best match based on confidence scores + 5. Executing the query with the selected agent + + **Available Agent Types:** + - `financial`: Financial analysis, SEC filings, company metrics + - `research`: General research, data exploration, trend analysis + - `rag`: Knowledge base search using RAG enrichment + + **Execution Modes:** + - `quick`: Fast responses (~2-5s), suitable for simple queries + - `standard`: Balanced approach (~5-15s), default mode + - `extended`: Comprehensive analysis (~15-60s), deep research + - `streaming`: Real-time response streaming + + **Confidence Score Interpretation:** + - `0.9-1.0`: High confidence, agent is ideal match + - `0.7-0.9`: Good confidence, agent is suitable + - `0.5-0.7`: Moderate confidence, agent can handle but may not be optimal + - `0.3-0.5`: Low confidence, fallback agent used + - `<0.3`: Very low confidence, consider using specific agent endpoint + + **Credit Costs:** + - Quick mode: 5-10 credits per query + - Standard mode: 15-25 credits per query + - Extended mode: 30-75 credits per query + - RAG enrichment: +5-15 credits (if enabled) + + **Use Cases:** + - Ask questions without specifying agent type + - Get intelligent routing for complex multi-domain queries + - Leverage conversation history for contextual understanding + - Enable RAG for knowledge base enrichment + + See request/response examples in the \"Examples\" dropdown below. Args: graph_id (str): @@ -164,17 +230,50 @@ async def asyncio_detailed( client: AuthenticatedClient, body: AgentRequest, ) -> Response[Union[AgentResponse, Any, ErrorResponse, HTTPValidationError]]: - """Auto-select agent for query + r"""Auto-select agent for query Automatically select the best agent for your query. - The orchestrator will: - 1. Enrich context with RAG if enabled - 2. Evaluate all available agents - 3. Select the best match based on confidence scores - 4. Execute the query with the selected agent - - Use this endpoint when you want the system to intelligently route your query. + **Agent Selection Process:** + + The orchestrator intelligently routes your query by: + 1. Analyzing query intent and complexity + 2. Enriching context with RAG if enabled + 3. Evaluating all available agents against selection criteria + 4. Selecting the best match based on confidence scores + 5. Executing the query with the selected agent + + **Available Agent Types:** + - `financial`: Financial analysis, SEC filings, company metrics + - `research`: General research, data exploration, trend analysis + - `rag`: Knowledge base search using RAG enrichment + + **Execution Modes:** + - `quick`: Fast responses (~2-5s), suitable for simple queries + - `standard`: Balanced approach (~5-15s), default mode + - `extended`: Comprehensive analysis (~15-60s), deep research + - `streaming`: Real-time response streaming + + **Confidence Score Interpretation:** + - `0.9-1.0`: High confidence, agent is ideal match + - `0.7-0.9`: Good confidence, agent is suitable + - `0.5-0.7`: Moderate confidence, agent can handle but may not be optimal + - `0.3-0.5`: Low confidence, fallback agent used + - `<0.3`: Very low confidence, consider using specific agent endpoint + + **Credit Costs:** + - Quick mode: 5-10 credits per query + - Standard mode: 15-25 credits per query + - Extended mode: 30-75 credits per query + - RAG enrichment: +5-15 credits (if enabled) + + **Use Cases:** + - Ask questions without specifying agent type + - Get intelligent routing for complex multi-domain queries + - Leverage conversation history for contextual understanding + - Enable RAG for knowledge base enrichment + + See request/response examples in the \"Examples\" dropdown below. Args: graph_id (str): @@ -204,17 +303,50 @@ async def asyncio( client: AuthenticatedClient, body: AgentRequest, ) -> Optional[Union[AgentResponse, Any, ErrorResponse, HTTPValidationError]]: - """Auto-select agent for query + r"""Auto-select agent for query Automatically select the best agent for your query. - The orchestrator will: - 1. Enrich context with RAG if enabled - 2. Evaluate all available agents - 3. Select the best match based on confidence scores - 4. Execute the query with the selected agent - - Use this endpoint when you want the system to intelligently route your query. + **Agent Selection Process:** + + The orchestrator intelligently routes your query by: + 1. Analyzing query intent and complexity + 2. Enriching context with RAG if enabled + 3. Evaluating all available agents against selection criteria + 4. Selecting the best match based on confidence scores + 5. Executing the query with the selected agent + + **Available Agent Types:** + - `financial`: Financial analysis, SEC filings, company metrics + - `research`: General research, data exploration, trend analysis + - `rag`: Knowledge base search using RAG enrichment + + **Execution Modes:** + - `quick`: Fast responses (~2-5s), suitable for simple queries + - `standard`: Balanced approach (~5-15s), default mode + - `extended`: Comprehensive analysis (~15-60s), deep research + - `streaming`: Real-time response streaming + + **Confidence Score Interpretation:** + - `0.9-1.0`: High confidence, agent is ideal match + - `0.7-0.9`: Good confidence, agent is suitable + - `0.5-0.7`: Moderate confidence, agent can handle but may not be optimal + - `0.3-0.5`: Low confidence, fallback agent used + - `<0.3`: Very low confidence, consider using specific agent endpoint + + **Credit Costs:** + - Quick mode: 5-10 credits per query + - Standard mode: 15-25 credits per query + - Extended mode: 30-75 credits per query + - RAG enrichment: +5-15 credits (if enabled) + + **Use Cases:** + - Ask questions without specifying agent type + - Get intelligent routing for complex multi-domain queries + - Leverage conversation history for contextual understanding + - Enable RAG for knowledge base enrichment + + See request/response examples in the \"Examples\" dropdown below. Args: graph_id (str): diff --git a/robosystems_client/api/backup/create_backup.py b/robosystems_client/api/backup/create_backup.py index 54844b7..b514a26 100644 --- a/robosystems_client/api/backup/create_backup.py +++ b/robosystems_client/api/backup/create_backup.py @@ -102,6 +102,23 @@ def sync_detailed( - **Download Support**: Unencrypted backups can be downloaded - **Restore Support**: Future support for encrypted backup restoration + **Operation State Machine:** + ``` + pending → processing → completed + ↘ failed + ``` + - **pending**: Backup queued, waiting to start + - **processing**: Actively backing up database + - **completed**: Backup successfully created and stored + - **failed**: Backup failed (check error message) + + **Expected Durations:** + Operation times vary by database size: + - **Small** (<1GB): 30 seconds - 2 minutes + - **Medium** (1-10GB): 2-10 minutes + - **Large** (10-100GB): 10-30 minutes + - **Very Large** (>100GB): 30+ minutes + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -109,6 +126,7 @@ def sync_detailed( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Backup progress:', data.progress_percent + '%'); + console.log('Status:', data.status); // pending, processing, completed, failed }); ``` @@ -177,6 +195,23 @@ def sync( - **Download Support**: Unencrypted backups can be downloaded - **Restore Support**: Future support for encrypted backup restoration + **Operation State Machine:** + ``` + pending → processing → completed + ↘ failed + ``` + - **pending**: Backup queued, waiting to start + - **processing**: Actively backing up database + - **completed**: Backup successfully created and stored + - **failed**: Backup failed (check error message) + + **Expected Durations:** + Operation times vary by database size: + - **Small** (<1GB): 30 seconds - 2 minutes + - **Medium** (1-10GB): 2-10 minutes + - **Large** (10-100GB): 10-30 minutes + - **Very Large** (>100GB): 30+ minutes + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -184,6 +219,7 @@ def sync( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Backup progress:', data.progress_percent + '%'); + console.log('Status:', data.status); // pending, processing, completed, failed }); ``` @@ -247,6 +283,23 @@ async def asyncio_detailed( - **Download Support**: Unencrypted backups can be downloaded - **Restore Support**: Future support for encrypted backup restoration + **Operation State Machine:** + ``` + pending → processing → completed + ↘ failed + ``` + - **pending**: Backup queued, waiting to start + - **processing**: Actively backing up database + - **completed**: Backup successfully created and stored + - **failed**: Backup failed (check error message) + + **Expected Durations:** + Operation times vary by database size: + - **Small** (<1GB): 30 seconds - 2 minutes + - **Medium** (1-10GB): 2-10 minutes + - **Large** (10-100GB): 10-30 minutes + - **Very Large** (>100GB): 30+ minutes + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -254,6 +307,7 @@ async def asyncio_detailed( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Backup progress:', data.progress_percent + '%'); + console.log('Status:', data.status); // pending, processing, completed, failed }); ``` @@ -320,6 +374,23 @@ async def asyncio( - **Download Support**: Unencrypted backups can be downloaded - **Restore Support**: Future support for encrypted backup restoration + **Operation State Machine:** + ``` + pending → processing → completed + ↘ failed + ``` + - **pending**: Backup queued, waiting to start + - **processing**: Actively backing up database + - **completed**: Backup successfully created and stored + - **failed**: Backup failed (check error message) + + **Expected Durations:** + Operation times vary by database size: + - **Small** (<1GB): 30 seconds - 2 minutes + - **Medium** (1-10GB): 2-10 minutes + - **Large** (10-100GB): 10-30 minutes + - **Very Large** (>100GB): 30+ minutes + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -327,6 +398,7 @@ async def asyncio( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Backup progress:', data.progress_percent + '%'); + console.log('Status:', data.status); // pending, processing, completed, failed }); ``` diff --git a/robosystems_client/api/backup/get_backup_download_url.py b/robosystems_client/api/backup/get_backup_download_url.py index f084b42..9793968 100644 --- a/robosystems_client/api/backup/get_backup_download_url.py +++ b/robosystems_client/api/backup/get_backup_download_url.py @@ -5,9 +5,7 @@ from ... import errors from ...client import AuthenticatedClient, Client -from ...models.get_backup_download_url_response_getbackupdownloadurl import ( - GetBackupDownloadUrlResponseGetbackupdownloadurl, -) +from ...models.backup_download_url_response import BackupDownloadUrlResponse from ...models.http_validation_error import HTTPValidationError from ...types import UNSET, Response, Unset @@ -35,13 +33,9 @@ def _get_kwargs( def _parse_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Optional[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Optional[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: if response.status_code == 200: - response_200 = GetBackupDownloadUrlResponseGetbackupdownloadurl.from_dict( - response.json() - ) + response_200 = BackupDownloadUrlResponse.from_dict(response.json()) return response_200 @@ -70,9 +64,7 @@ def _parse_response( def _build_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Response[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Response[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: return Response( status_code=HTTPStatus(response.status_code), content=response.content, @@ -87,9 +79,7 @@ def sync_detailed( *, client: AuthenticatedClient, expires_in: Union[Unset, int] = 3600, -) -> Response[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Response[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: """Get temporary download URL for backup Generate a temporary download URL for a backup (unencrypted, compressed .kuzu files only) @@ -104,7 +94,7 @@ def sync_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError]] + Response[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -126,9 +116,7 @@ def sync( *, client: AuthenticatedClient, expires_in: Union[Unset, int] = 3600, -) -> Optional[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Optional[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: """Get temporary download URL for backup Generate a temporary download URL for a backup (unencrypted, compressed .kuzu files only) @@ -143,7 +131,7 @@ def sync( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] + Union[Any, BackupDownloadUrlResponse, HTTPValidationError] """ return sync_detailed( @@ -160,9 +148,7 @@ async def asyncio_detailed( *, client: AuthenticatedClient, expires_in: Union[Unset, int] = 3600, -) -> Response[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Response[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: """Get temporary download URL for backup Generate a temporary download URL for a backup (unencrypted, compressed .kuzu files only) @@ -177,7 +163,7 @@ async def asyncio_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError]] + Response[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -197,9 +183,7 @@ async def asyncio( *, client: AuthenticatedClient, expires_in: Union[Unset, int] = 3600, -) -> Optional[ - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] -]: +) -> Optional[Union[Any, BackupDownloadUrlResponse, HTTPValidationError]]: """Get temporary download URL for backup Generate a temporary download URL for a backup (unencrypted, compressed .kuzu files only) @@ -214,7 +198,7 @@ async def asyncio( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, GetBackupDownloadUrlResponseGetbackupdownloadurl, HTTPValidationError] + Union[Any, BackupDownloadUrlResponse, HTTPValidationError] """ return ( diff --git a/robosystems_client/api/backup/restore_backup.py b/robosystems_client/api/backup/restore_backup.py index bd38adc..872c3bc 100644 --- a/robosystems_client/api/backup/restore_backup.py +++ b/robosystems_client/api/backup/restore_backup.py @@ -104,6 +104,28 @@ def sync_detailed( - **Data Integrity**: Verification ensures successful restore - **Security**: Only encrypted backups to prevent data tampering + **Operation State Machine:** + ``` + pending → backing_up_current → downloading → restoring → verifying → completed + ↘ failed + ``` + - **pending**: Restore queued, waiting to start + - **backing_up_current**: Creating safety backup of existing database + - **downloading**: Downloading backup from storage + - **restoring**: Replacing database with backup contents + - **verifying**: Verifying database integrity (if enabled) + - **completed**: Restore successful, database operational + - **failed**: Restore failed (rollback may be available) + + **Expected Durations:** + Operation times vary by database size (includes backup + restore): + - **Small** (<1GB): 1-3 minutes + - **Medium** (1-10GB): 5-15 minutes + - **Large** (10-100GB): 20-45 minutes + - **Very Large** (>100GB): 45+ minutes + + Note: Restore operations take longer than backups due to safety backup step. + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -111,6 +133,7 @@ def sync_detailed( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Restore progress:', data.message); + console.log('Status:', data.status); // Shows current state }); ``` @@ -182,6 +205,28 @@ def sync( - **Data Integrity**: Verification ensures successful restore - **Security**: Only encrypted backups to prevent data tampering + **Operation State Machine:** + ``` + pending → backing_up_current → downloading → restoring → verifying → completed + ↘ failed + ``` + - **pending**: Restore queued, waiting to start + - **backing_up_current**: Creating safety backup of existing database + - **downloading**: Downloading backup from storage + - **restoring**: Replacing database with backup contents + - **verifying**: Verifying database integrity (if enabled) + - **completed**: Restore successful, database operational + - **failed**: Restore failed (rollback may be available) + + **Expected Durations:** + Operation times vary by database size (includes backup + restore): + - **Small** (<1GB): 1-3 minutes + - **Medium** (1-10GB): 5-15 minutes + - **Large** (10-100GB): 20-45 minutes + - **Very Large** (>100GB): 45+ minutes + + Note: Restore operations take longer than backups due to safety backup step. + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -189,6 +234,7 @@ def sync( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Restore progress:', data.message); + console.log('Status:', data.status); // Shows current state }); ``` @@ -255,6 +301,28 @@ async def asyncio_detailed( - **Data Integrity**: Verification ensures successful restore - **Security**: Only encrypted backups to prevent data tampering + **Operation State Machine:** + ``` + pending → backing_up_current → downloading → restoring → verifying → completed + ↘ failed + ``` + - **pending**: Restore queued, waiting to start + - **backing_up_current**: Creating safety backup of existing database + - **downloading**: Downloading backup from storage + - **restoring**: Replacing database with backup contents + - **verifying**: Verifying database integrity (if enabled) + - **completed**: Restore successful, database operational + - **failed**: Restore failed (rollback may be available) + + **Expected Durations:** + Operation times vary by database size (includes backup + restore): + - **Small** (<1GB): 1-3 minutes + - **Medium** (1-10GB): 5-15 minutes + - **Large** (10-100GB): 20-45 minutes + - **Very Large** (>100GB): 45+ minutes + + Note: Restore operations take longer than backups due to safety backup step. + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -262,6 +330,7 @@ async def asyncio_detailed( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Restore progress:', data.message); + console.log('Status:', data.status); // Shows current state }); ``` @@ -331,6 +400,28 @@ async def asyncio( - **Data Integrity**: Verification ensures successful restore - **Security**: Only encrypted backups to prevent data tampering + **Operation State Machine:** + ``` + pending → backing_up_current → downloading → restoring → verifying → completed + ↘ failed + ``` + - **pending**: Restore queued, waiting to start + - **backing_up_current**: Creating safety backup of existing database + - **downloading**: Downloading backup from storage + - **restoring**: Replacing database with backup contents + - **verifying**: Verifying database integrity (if enabled) + - **completed**: Restore successful, database operational + - **failed**: Restore failed (rollback may be available) + + **Expected Durations:** + Operation times vary by database size (includes backup + restore): + - **Small** (<1GB): 1-3 minutes + - **Medium** (1-10GB): 5-15 minutes + - **Large** (10-100GB): 20-45 minutes + - **Very Large** (>100GB): 45+ minutes + + Note: Restore operations take longer than backups due to safety backup step. + **Progress Monitoring:** Use the returned operation_id to connect to the SSE stream: ```javascript @@ -338,6 +429,7 @@ async def asyncio( eventSource.addEventListener('operation_progress', (event) => { const data = JSON.parse(event.data); console.log('Restore progress:', data.message); + console.log('Status:', data.status); // Shows current state }); ``` diff --git a/robosystems_client/api/graph_limits/get_graph_limits.py b/robosystems_client/api/graph_limits/get_graph_limits.py index 8628679..02f30d9 100644 --- a/robosystems_client/api/graph_limits/get_graph_limits.py +++ b/robosystems_client/api/graph_limits/get_graph_limits.py @@ -5,9 +5,7 @@ from ... import errors from ...client import AuthenticatedClient, Client -from ...models.get_graph_limits_response_getgraphlimits import ( - GetGraphLimitsResponseGetgraphlimits, -) +from ...models.graph_limits_response import GraphLimitsResponse from ...models.http_validation_error import HTTPValidationError from ...types import Response @@ -25,9 +23,9 @@ def _get_kwargs( def _parse_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Optional[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Optional[Union[Any, GraphLimitsResponse, HTTPValidationError]]: if response.status_code == 200: - response_200 = GetGraphLimitsResponseGetgraphlimits.from_dict(response.json()) + response_200 = GraphLimitsResponse.from_dict(response.json()) return response_200 @@ -56,7 +54,7 @@ def _parse_response( def _build_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Response[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Response[Union[Any, GraphLimitsResponse, HTTPValidationError]]: return Response( status_code=HTTPStatus(response.status_code), content=response.content, @@ -69,7 +67,7 @@ def sync_detailed( graph_id: str, *, client: AuthenticatedClient, -) -> Response[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Response[Union[Any, GraphLimitsResponse, HTTPValidationError]]: """Get Graph Operational Limits Get comprehensive operational limits for the graph database. @@ -94,7 +92,7 @@ def sync_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]] + Response[Union[Any, GraphLimitsResponse, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -112,7 +110,7 @@ def sync( graph_id: str, *, client: AuthenticatedClient, -) -> Optional[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Optional[Union[Any, GraphLimitsResponse, HTTPValidationError]]: """Get Graph Operational Limits Get comprehensive operational limits for the graph database. @@ -137,7 +135,7 @@ def sync( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError] + Union[Any, GraphLimitsResponse, HTTPValidationError] """ return sync_detailed( @@ -150,7 +148,7 @@ async def asyncio_detailed( graph_id: str, *, client: AuthenticatedClient, -) -> Response[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Response[Union[Any, GraphLimitsResponse, HTTPValidationError]]: """Get Graph Operational Limits Get comprehensive operational limits for the graph database. @@ -175,7 +173,7 @@ async def asyncio_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]] + Response[Union[Any, GraphLimitsResponse, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -191,7 +189,7 @@ async def asyncio( graph_id: str, *, client: AuthenticatedClient, -) -> Optional[Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError]]: +) -> Optional[Union[Any, GraphLimitsResponse, HTTPValidationError]]: """Get Graph Operational Limits Get comprehensive operational limits for the graph database. @@ -216,7 +214,7 @@ async def asyncio( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, GetGraphLimitsResponseGetgraphlimits, HTTPValidationError] + Union[Any, GraphLimitsResponse, HTTPValidationError] """ return ( diff --git a/robosystems_client/api/graphs/create_graph.py b/robosystems_client/api/graphs/create_graph.py index 1bac42e..dbdb8d8 100644 --- a/robosystems_client/api/graphs/create_graph.py +++ b/robosystems_client/api/graphs/create_graph.py @@ -70,9 +70,35 @@ def sync_detailed( This endpoint starts an asynchronous graph creation operation and returns connection details for monitoring progress via Server-Sent Events (SSE). - **Operation Types:** - - **Generic Graph**: Creates empty graph with schema extensions - - **Entity Graph**: Creates graph with initial entity data + **Graph Creation Options:** + + 1. **Entity Graph with Initial Entity** (`initial_entity` provided, `create_entity=True`): + - Creates graph structure with entity schema extensions + - Populates an initial entity node with provided data + - Useful when you want a pre-configured entity to start with + - Example: Creating a company graph with the company already populated + + 2. **Entity Graph without Initial Entity** (`initial_entity=None`, `create_entity=False`): + - Creates graph structure with entity schema extensions + - Graph starts empty, ready for data import + - Useful for bulk data imports or custom workflows + - Example: Creating a graph structure before importing from CSV/API + + 3. **Generic Graph** (no `initial_entity` provided): + - Creates empty graph with custom schema extensions + - General-purpose knowledge graph + - Example: Analytics graphs, custom data models + + **Required Fields:** + - `metadata.graph_name`: Unique name for the graph + - `instance_tier`: Resource tier (kuzu-standard, kuzu-large, kuzu-xlarge) + + **Optional Fields:** + - `metadata.description`: Human-readable description of the graph's purpose + - `metadata.schema_extensions`: List of schema extensions (roboledger, roboinvestor, etc.) + - `tags`: Organizational tags (max 10) + - `initial_entity`: Entity data (required for entity graphs with initial data) + - `create_entity`: Whether to populate initial entity (default: true when initial_entity provided) **Monitoring Progress:** Use the returned `operation_id` to connect to the SSE stream: @@ -107,12 +133,11 @@ def sync_detailed( - `_links.status`: Point-in-time status check endpoint Args: - body (CreateGraphRequest): Request model for creating a new graph. Example: - {'initial_entity': {'cik': '0001234567', 'name': 'Acme Consulting LLC', 'uri': - 'https://acmeconsulting.com'}, 'instance_tier': 'kuzu-standard', 'metadata': - {'description': 'Professional consulting services with full accounting integration', - 'graph_name': 'Acme Consulting LLC', 'schema_extensions': ['roboledger']}, 'tags': - ['consulting', 'professional-services']}. + body (CreateGraphRequest): Request model for creating a new graph. + + Use this to create either: + - **Entity graphs**: Standard graphs with entity schema and optional extensions + - **Custom graphs**: Generic graphs with fully custom schema definitions Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. @@ -145,9 +170,35 @@ def sync( This endpoint starts an asynchronous graph creation operation and returns connection details for monitoring progress via Server-Sent Events (SSE). - **Operation Types:** - - **Generic Graph**: Creates empty graph with schema extensions - - **Entity Graph**: Creates graph with initial entity data + **Graph Creation Options:** + + 1. **Entity Graph with Initial Entity** (`initial_entity` provided, `create_entity=True`): + - Creates graph structure with entity schema extensions + - Populates an initial entity node with provided data + - Useful when you want a pre-configured entity to start with + - Example: Creating a company graph with the company already populated + + 2. **Entity Graph without Initial Entity** (`initial_entity=None`, `create_entity=False`): + - Creates graph structure with entity schema extensions + - Graph starts empty, ready for data import + - Useful for bulk data imports or custom workflows + - Example: Creating a graph structure before importing from CSV/API + + 3. **Generic Graph** (no `initial_entity` provided): + - Creates empty graph with custom schema extensions + - General-purpose knowledge graph + - Example: Analytics graphs, custom data models + + **Required Fields:** + - `metadata.graph_name`: Unique name for the graph + - `instance_tier`: Resource tier (kuzu-standard, kuzu-large, kuzu-xlarge) + + **Optional Fields:** + - `metadata.description`: Human-readable description of the graph's purpose + - `metadata.schema_extensions`: List of schema extensions (roboledger, roboinvestor, etc.) + - `tags`: Organizational tags (max 10) + - `initial_entity`: Entity data (required for entity graphs with initial data) + - `create_entity`: Whether to populate initial entity (default: true when initial_entity provided) **Monitoring Progress:** Use the returned `operation_id` to connect to the SSE stream: @@ -182,12 +233,11 @@ def sync( - `_links.status`: Point-in-time status check endpoint Args: - body (CreateGraphRequest): Request model for creating a new graph. Example: - {'initial_entity': {'cik': '0001234567', 'name': 'Acme Consulting LLC', 'uri': - 'https://acmeconsulting.com'}, 'instance_tier': 'kuzu-standard', 'metadata': - {'description': 'Professional consulting services with full accounting integration', - 'graph_name': 'Acme Consulting LLC', 'schema_extensions': ['roboledger']}, 'tags': - ['consulting', 'professional-services']}. + body (CreateGraphRequest): Request model for creating a new graph. + + Use this to create either: + - **Entity graphs**: Standard graphs with entity schema and optional extensions + - **Custom graphs**: Generic graphs with fully custom schema definitions Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. @@ -215,9 +265,35 @@ async def asyncio_detailed( This endpoint starts an asynchronous graph creation operation and returns connection details for monitoring progress via Server-Sent Events (SSE). - **Operation Types:** - - **Generic Graph**: Creates empty graph with schema extensions - - **Entity Graph**: Creates graph with initial entity data + **Graph Creation Options:** + + 1. **Entity Graph with Initial Entity** (`initial_entity` provided, `create_entity=True`): + - Creates graph structure with entity schema extensions + - Populates an initial entity node with provided data + - Useful when you want a pre-configured entity to start with + - Example: Creating a company graph with the company already populated + + 2. **Entity Graph without Initial Entity** (`initial_entity=None`, `create_entity=False`): + - Creates graph structure with entity schema extensions + - Graph starts empty, ready for data import + - Useful for bulk data imports or custom workflows + - Example: Creating a graph structure before importing from CSV/API + + 3. **Generic Graph** (no `initial_entity` provided): + - Creates empty graph with custom schema extensions + - General-purpose knowledge graph + - Example: Analytics graphs, custom data models + + **Required Fields:** + - `metadata.graph_name`: Unique name for the graph + - `instance_tier`: Resource tier (kuzu-standard, kuzu-large, kuzu-xlarge) + + **Optional Fields:** + - `metadata.description`: Human-readable description of the graph's purpose + - `metadata.schema_extensions`: List of schema extensions (roboledger, roboinvestor, etc.) + - `tags`: Organizational tags (max 10) + - `initial_entity`: Entity data (required for entity graphs with initial data) + - `create_entity`: Whether to populate initial entity (default: true when initial_entity provided) **Monitoring Progress:** Use the returned `operation_id` to connect to the SSE stream: @@ -252,12 +328,11 @@ async def asyncio_detailed( - `_links.status`: Point-in-time status check endpoint Args: - body (CreateGraphRequest): Request model for creating a new graph. Example: - {'initial_entity': {'cik': '0001234567', 'name': 'Acme Consulting LLC', 'uri': - 'https://acmeconsulting.com'}, 'instance_tier': 'kuzu-standard', 'metadata': - {'description': 'Professional consulting services with full accounting integration', - 'graph_name': 'Acme Consulting LLC', 'schema_extensions': ['roboledger']}, 'tags': - ['consulting', 'professional-services']}. + body (CreateGraphRequest): Request model for creating a new graph. + + Use this to create either: + - **Entity graphs**: Standard graphs with entity schema and optional extensions + - **Custom graphs**: Generic graphs with fully custom schema definitions Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. @@ -288,9 +363,35 @@ async def asyncio( This endpoint starts an asynchronous graph creation operation and returns connection details for monitoring progress via Server-Sent Events (SSE). - **Operation Types:** - - **Generic Graph**: Creates empty graph with schema extensions - - **Entity Graph**: Creates graph with initial entity data + **Graph Creation Options:** + + 1. **Entity Graph with Initial Entity** (`initial_entity` provided, `create_entity=True`): + - Creates graph structure with entity schema extensions + - Populates an initial entity node with provided data + - Useful when you want a pre-configured entity to start with + - Example: Creating a company graph with the company already populated + + 2. **Entity Graph without Initial Entity** (`initial_entity=None`, `create_entity=False`): + - Creates graph structure with entity schema extensions + - Graph starts empty, ready for data import + - Useful for bulk data imports or custom workflows + - Example: Creating a graph structure before importing from CSV/API + + 3. **Generic Graph** (no `initial_entity` provided): + - Creates empty graph with custom schema extensions + - General-purpose knowledge graph + - Example: Analytics graphs, custom data models + + **Required Fields:** + - `metadata.graph_name`: Unique name for the graph + - `instance_tier`: Resource tier (kuzu-standard, kuzu-large, kuzu-xlarge) + + **Optional Fields:** + - `metadata.description`: Human-readable description of the graph's purpose + - `metadata.schema_extensions`: List of schema extensions (roboledger, roboinvestor, etc.) + - `tags`: Organizational tags (max 10) + - `initial_entity`: Entity data (required for entity graphs with initial data) + - `create_entity`: Whether to populate initial entity (default: true when initial_entity provided) **Monitoring Progress:** Use the returned `operation_id` to connect to the SSE stream: @@ -325,12 +426,11 @@ async def asyncio( - `_links.status`: Point-in-time status check endpoint Args: - body (CreateGraphRequest): Request model for creating a new graph. Example: - {'initial_entity': {'cik': '0001234567', 'name': 'Acme Consulting LLC', 'uri': - 'https://acmeconsulting.com'}, 'instance_tier': 'kuzu-standard', 'metadata': - {'description': 'Professional consulting services with full accounting integration', - 'graph_name': 'Acme Consulting LLC', 'schema_extensions': ['roboledger']}, 'tags': - ['consulting', 'professional-services']}. + body (CreateGraphRequest): Request model for creating a new graph. + + Use this to create either: + - **Entity graphs**: Standard graphs with entity schema and optional extensions + - **Custom graphs**: Generic graphs with fully custom schema definitions Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. diff --git a/robosystems_client/api/graphs/get_available_graph_tiers.py b/robosystems_client/api/graphs/get_available_graph_tiers.py new file mode 100644 index 0000000..0dda407 --- /dev/null +++ b/robosystems_client/api/graphs/get_available_graph_tiers.py @@ -0,0 +1,279 @@ +from http import HTTPStatus +from typing import Any, Optional, Union, cast + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.http_validation_error import HTTPValidationError +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + *, + include_disabled: Union[Unset, bool] = False, +) -> dict[str, Any]: + params: dict[str, Any] = {} + + params["include_disabled"] = include_disabled + + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} + + _kwargs: dict[str, Any] = { + "method": "get", + "url": "/v1/graphs/tiers", + "params": params, + } + + return _kwargs + + +def _parse_response( + *, client: Union[AuthenticatedClient, Client], response: httpx.Response +) -> Optional[Union[Any, HTTPValidationError]]: + if response.status_code == 200: + response_200 = response.json() + return response_200 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: Union[AuthenticatedClient, Client], response: httpx.Response +) -> Response[Union[Any, HTTPValidationError]]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + *, + client: AuthenticatedClient, + include_disabled: Union[Unset, bool] = False, +) -> Response[Union[Any, HTTPValidationError]]: + """Get Available Graph Tiers + + List all available graph database tier configurations. + + This endpoint provides comprehensive technical specifications for each available + graph database tier, including instance types, resource limits, and features. + + **Tier Information:** + Each tier includes: + - Technical specifications (instance type, memory, storage) + - Resource limits (subgraphs, credits, rate limits) + - Feature list with capabilities + - Availability status + + **Available Tiers:** + - **kuzu-standard**: Multi-tenant entry-level tier + - **kuzu-large**: Dedicated professional tier with subgraph support + - **kuzu-xlarge**: Enterprise tier with maximum resources + - **neo4j-community-large**: Neo4j Community Edition (optional, if enabled) + - **neo4j-enterprise-xlarge**: Neo4j Enterprise Edition (optional, if enabled) + + **Use Cases:** + - Display tier options in graph creation UI + - Show technical specifications for tier selection + - Validate tier availability before graph creation + - Display feature comparisons + + **Note:** + Tier listing is included - no credit consumption required. + + Args: + include_disabled (Union[Unset, bool]): Default: False. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[Any, HTTPValidationError]] + """ + + kwargs = _get_kwargs( + include_disabled=include_disabled, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + *, + client: AuthenticatedClient, + include_disabled: Union[Unset, bool] = False, +) -> Optional[Union[Any, HTTPValidationError]]: + """Get Available Graph Tiers + + List all available graph database tier configurations. + + This endpoint provides comprehensive technical specifications for each available + graph database tier, including instance types, resource limits, and features. + + **Tier Information:** + Each tier includes: + - Technical specifications (instance type, memory, storage) + - Resource limits (subgraphs, credits, rate limits) + - Feature list with capabilities + - Availability status + + **Available Tiers:** + - **kuzu-standard**: Multi-tenant entry-level tier + - **kuzu-large**: Dedicated professional tier with subgraph support + - **kuzu-xlarge**: Enterprise tier with maximum resources + - **neo4j-community-large**: Neo4j Community Edition (optional, if enabled) + - **neo4j-enterprise-xlarge**: Neo4j Enterprise Edition (optional, if enabled) + + **Use Cases:** + - Display tier options in graph creation UI + - Show technical specifications for tier selection + - Validate tier availability before graph creation + - Display feature comparisons + + **Note:** + Tier listing is included - no credit consumption required. + + Args: + include_disabled (Union[Unset, bool]): Default: False. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Union[Any, HTTPValidationError] + """ + + return sync_detailed( + client=client, + include_disabled=include_disabled, + ).parsed + + +async def asyncio_detailed( + *, + client: AuthenticatedClient, + include_disabled: Union[Unset, bool] = False, +) -> Response[Union[Any, HTTPValidationError]]: + """Get Available Graph Tiers + + List all available graph database tier configurations. + + This endpoint provides comprehensive technical specifications for each available + graph database tier, including instance types, resource limits, and features. + + **Tier Information:** + Each tier includes: + - Technical specifications (instance type, memory, storage) + - Resource limits (subgraphs, credits, rate limits) + - Feature list with capabilities + - Availability status + + **Available Tiers:** + - **kuzu-standard**: Multi-tenant entry-level tier + - **kuzu-large**: Dedicated professional tier with subgraph support + - **kuzu-xlarge**: Enterprise tier with maximum resources + - **neo4j-community-large**: Neo4j Community Edition (optional, if enabled) + - **neo4j-enterprise-xlarge**: Neo4j Enterprise Edition (optional, if enabled) + + **Use Cases:** + - Display tier options in graph creation UI + - Show technical specifications for tier selection + - Validate tier availability before graph creation + - Display feature comparisons + + **Note:** + Tier listing is included - no credit consumption required. + + Args: + include_disabled (Union[Unset, bool]): Default: False. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[Any, HTTPValidationError]] + """ + + kwargs = _get_kwargs( + include_disabled=include_disabled, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + *, + client: AuthenticatedClient, + include_disabled: Union[Unset, bool] = False, +) -> Optional[Union[Any, HTTPValidationError]]: + """Get Available Graph Tiers + + List all available graph database tier configurations. + + This endpoint provides comprehensive technical specifications for each available + graph database tier, including instance types, resource limits, and features. + + **Tier Information:** + Each tier includes: + - Technical specifications (instance type, memory, storage) + - Resource limits (subgraphs, credits, rate limits) + - Feature list with capabilities + - Availability status + + **Available Tiers:** + - **kuzu-standard**: Multi-tenant entry-level tier + - **kuzu-large**: Dedicated professional tier with subgraph support + - **kuzu-xlarge**: Enterprise tier with maximum resources + - **neo4j-community-large**: Neo4j Community Edition (optional, if enabled) + - **neo4j-enterprise-xlarge**: Neo4j Enterprise Edition (optional, if enabled) + + **Use Cases:** + - Display tier options in graph creation UI + - Show technical specifications for tier selection + - Validate tier availability before graph creation + - Display feature comparisons + + **Note:** + Tier listing is included - no credit consumption required. + + Args: + include_disabled (Union[Unset, bool]): Default: False. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Union[Any, HTTPValidationError] + """ + + return ( + await asyncio_detailed( + client=client, + include_disabled=include_disabled, + ) + ).parsed diff --git a/robosystems_client/api/query/execute_cypher_query.py b/robosystems_client/api/query/execute_cypher_query.py index c0d311e..08bb2de 100644 --- a/robosystems_client/api/query/execute_cypher_query.py +++ b/robosystems_client/api/query/execute_cypher_query.py @@ -6,6 +6,7 @@ from ... import errors from ...client import AuthenticatedClient, Client from ...models.cypher_query_request import CypherQueryRequest +from ...models.execute_cypher_query_response_200 import ExecuteCypherQueryResponse200 from ...models.http_validation_error import HTTPValidationError from ...models.response_mode import ResponseMode from ...types import UNSET, Response, Unset @@ -59,7 +60,7 @@ def _get_kwargs( def _parse_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Optional[Union[Any, HTTPValidationError]]: +) -> Optional[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: if response.status_code == 200: content_type = response.headers.get("content-type", "") if ( @@ -67,7 +68,8 @@ def _parse_response( or response.headers.get("x-stream-format") == "ndjson" ): return None - response_200 = response.json() + response_200 = ExecuteCypherQueryResponse200.from_dict(response.json()) + return response_200 if response.status_code == 202: @@ -111,7 +113,7 @@ def _parse_response( def _build_response( *, client: Union[AuthenticatedClient, Client], response: httpx.Response -) -> Response[Union[Any, HTTPValidationError]]: +) -> Response[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: return Response( status_code=HTTPStatus(response.status_code), content=response.content, @@ -128,7 +130,7 @@ def sync_detailed( mode: Union[None, ResponseMode, Unset] = UNSET, chunk_size: Union[None, Unset, int] = UNSET, test_mode: Union[Unset, bool] = False, -) -> Response[Union[Any, HTTPValidationError]]: +) -> Response[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: r"""Execute Cypher Query (Read-Only) Execute a read-only Cypher query with intelligent response optimization. @@ -207,7 +209,7 @@ def sync_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, HTTPValidationError]] + Response[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -233,7 +235,7 @@ def sync( mode: Union[None, ResponseMode, Unset] = UNSET, chunk_size: Union[None, Unset, int] = UNSET, test_mode: Union[Unset, bool] = False, -) -> Optional[Union[Any, HTTPValidationError]]: +) -> Optional[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: r"""Execute Cypher Query (Read-Only) Execute a read-only Cypher query with intelligent response optimization. @@ -312,7 +314,7 @@ def sync( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, HTTPValidationError] + Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError] """ return sync_detailed( @@ -333,7 +335,7 @@ async def asyncio_detailed( mode: Union[None, ResponseMode, Unset] = UNSET, chunk_size: Union[None, Unset, int] = UNSET, test_mode: Union[Unset, bool] = False, -) -> Response[Union[Any, HTTPValidationError]]: +) -> Response[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: r"""Execute Cypher Query (Read-Only) Execute a read-only Cypher query with intelligent response optimization. @@ -412,7 +414,7 @@ async def asyncio_detailed( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Any, HTTPValidationError]] + Response[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]] """ kwargs = _get_kwargs( @@ -436,7 +438,7 @@ async def asyncio( mode: Union[None, ResponseMode, Unset] = UNSET, chunk_size: Union[None, Unset, int] = UNSET, test_mode: Union[Unset, bool] = False, -) -> Optional[Union[Any, HTTPValidationError]]: +) -> Optional[Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError]]: r"""Execute Cypher Query (Read-Only) Execute a read-only Cypher query with intelligent response optimization. @@ -515,7 +517,7 @@ async def asyncio( httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Union[Any, HTTPValidationError] + Union[Any, ExecuteCypherQueryResponse200, HTTPValidationError] """ return ( diff --git a/robosystems_client/models/__init__.py b/robosystems_client/models/__init__.py index b2e0021..1638ad2 100644 --- a/robosystems_client/models/__init__.py +++ b/robosystems_client/models/__init__.py @@ -29,6 +29,8 @@ from .available_extension import AvailableExtension from .available_extensions_response import AvailableExtensionsResponse from .backup_create_request import BackupCreateRequest +from .backup_download_url_response import BackupDownloadUrlResponse +from .backup_limits import BackupLimits from .backup_list_response import BackupListResponse from .backup_response import BackupResponse from .backup_restore_request import BackupRestoreRequest @@ -52,6 +54,7 @@ from .connection_response import ConnectionResponse from .connection_response_metadata import ConnectionResponseMetadata from .connection_response_provider import ConnectionResponseProvider +from .copy_operation_limits import CopyOperationLimits from .create_api_key_request import CreateAPIKeyRequest from .create_api_key_response import CreateAPIKeyResponse from .create_connection_request import CreateConnectionRequest @@ -59,6 +62,7 @@ from .create_graph_request import CreateGraphRequest from .create_subgraph_request import CreateSubgraphRequest from .create_subgraph_request_metadata_type_0 import CreateSubgraphRequestMetadataType0 +from .credit_limits import CreditLimits from .credit_summary import CreditSummary from .credit_summary_response import CreditSummaryResponse from .credits_summary_response import CreditsSummaryResponse @@ -91,6 +95,10 @@ from .error_response import ErrorResponse from .exchange_token_request import ExchangeTokenRequest from .exchange_token_request_metadata_type_0 import ExchangeTokenRequestMetadataType0 +from .execute_cypher_query_response_200 import ExecuteCypherQueryResponse200 +from .execute_cypher_query_response_200_data_item import ( + ExecuteCypherQueryResponse200DataItem, +) from .file_info import FileInfo from .file_status_update import FileStatusUpdate from .file_upload_request import FileUploadRequest @@ -105,9 +113,6 @@ from .get_all_shared_repository_limits_response_getallsharedrepositorylimits import ( GetAllSharedRepositoryLimitsResponseGetallsharedrepositorylimits, ) -from .get_backup_download_url_response_getbackupdownloadurl import ( - GetBackupDownloadUrlResponseGetbackupdownloadurl, -) from .get_current_auth_user_response_getcurrentauthuser import ( GetCurrentAuthUserResponseGetcurrentauthuser, ) @@ -118,9 +123,6 @@ from .get_graph_billing_history_response_getgraphbillinghistory import ( GetGraphBillingHistoryResponseGetgraphbillinghistory, ) -from .get_graph_limits_response_getgraphlimits import ( - GetGraphLimitsResponseGetgraphlimits, -) from .get_graph_monthly_bill_response_getgraphmonthlybill import ( GetGraphMonthlyBillResponseGetgraphmonthlybill, ) @@ -137,6 +139,7 @@ GetStorageUsageResponseGetstorageusage, ) from .graph_info import GraphInfo +from .graph_limits_response import GraphLimitsResponse from .graph_metadata import GraphMetadata from .graph_metrics_response import GraphMetricsResponse from .graph_metrics_response_estimated_size import GraphMetricsResponseEstimatedSize @@ -183,7 +186,9 @@ from .plaid_connection_config_institution_type_0 import ( PlaidConnectionConfigInstitutionType0, ) +from .query_limits import QueryLimits from .quick_books_connection_config import QuickBooksConnectionConfig +from .rate_limits import RateLimits from .register_request import RegisterRequest from .repository_credits_response import RepositoryCreditsResponse from .repository_plan import RepositoryPlan @@ -217,6 +222,7 @@ from .sso_exchange_response import SSOExchangeResponse from .sso_token_response import SSOTokenResponse from .storage_limit_response import StorageLimitResponse +from .storage_limits import StorageLimits from .subgraph_quota_response import SubgraphQuotaResponse from .subgraph_response import SubgraphResponse from .subgraph_response_metadata_type_0 import SubgraphResponseMetadataType0 @@ -295,6 +301,8 @@ "AvailableExtension", "AvailableExtensionsResponse", "BackupCreateRequest", + "BackupDownloadUrlResponse", + "BackupLimits", "BackupListResponse", "BackupResponse", "BackupRestoreRequest", @@ -314,6 +322,7 @@ "ConnectionResponse", "ConnectionResponseMetadata", "ConnectionResponseProvider", + "CopyOperationLimits", "CreateAPIKeyRequest", "CreateAPIKeyResponse", "CreateConnectionRequest", @@ -321,6 +330,7 @@ "CreateGraphRequest", "CreateSubgraphRequest", "CreateSubgraphRequestMetadataType0", + "CreditLimits", "CreditsSummaryResponse", "CreditsSummaryResponseCreditsByAddonType0Item", "CreditSummary", @@ -345,6 +355,8 @@ "ErrorResponse", "ExchangeTokenRequest", "ExchangeTokenRequestMetadataType0", + "ExecuteCypherQueryResponse200", + "ExecuteCypherQueryResponse200DataItem", "FileInfo", "FileStatusUpdate", "FileUploadRequest", @@ -353,18 +365,17 @@ "ForgotPasswordResponseForgotpassword", "GetAllCreditSummariesResponseGetallcreditsummaries", "GetAllSharedRepositoryLimitsResponseGetallsharedrepositorylimits", - "GetBackupDownloadUrlResponseGetbackupdownloadurl", "GetCurrentAuthUserResponseGetcurrentauthuser", "GetCurrentGraphBillResponseGetcurrentgraphbill", "GetFileInfoResponse", "GetGraphBillingHistoryResponseGetgraphbillinghistory", - "GetGraphLimitsResponseGetgraphlimits", "GetGraphMonthlyBillResponseGetgraphmonthlybill", "GetGraphUsageDetailsResponseGetgraphusagedetails", "GetOperationStatusResponseGetoperationstatus", "GetSharedRepositoryLimitsResponseGetsharedrepositorylimits", "GetStorageUsageResponseGetstorageusage", "GraphInfo", + "GraphLimitsResponse", "GraphMetadata", "GraphMetricsResponse", "GraphMetricsResponseEstimatedSize", @@ -403,7 +414,9 @@ "PlaidConnectionConfig", "PlaidConnectionConfigAccountsType0Item", "PlaidConnectionConfigInstitutionType0", + "QueryLimits", "QuickBooksConnectionConfig", + "RateLimits", "RegisterRequest", "RepositoryCreditsResponse", "RepositoryPlan", @@ -429,6 +442,7 @@ "SSOExchangeResponse", "SSOTokenResponse", "StorageLimitResponse", + "StorageLimits", "SubgraphQuotaResponse", "SubgraphResponse", "SubgraphResponseMetadataType0", diff --git a/robosystems_client/models/agent_response.py b/robosystems_client/models/agent_response.py index 71e7b63..8d09c64 100644 --- a/robosystems_client/models/agent_response.py +++ b/robosystems_client/models/agent_response.py @@ -30,7 +30,7 @@ class AgentResponse: mode_used (AgentMode): Agent execution modes. metadata (Union['AgentResponseMetadataType0', None, Unset]): Response metadata including routing info tokens_used (Union['AgentResponseTokensUsedType0', None, Unset]): Token usage statistics - confidence_score (Union[None, Unset, float]): Confidence score of the response + confidence_score (Union[None, Unset, float]): Confidence score of the response (0.0-1.0 scale) operation_id (Union[None, Unset, str]): Operation ID for SSE monitoring is_partial (Union[Unset, bool]): Whether this is a partial response Default: False. error_details (Union['AgentResponseErrorDetailsType0', None, Unset]): Error details if any diff --git a/robosystems_client/models/backup_download_url_response.py b/robosystems_client/models/backup_download_url_response.py new file mode 100644 index 0000000..f9c6e41 --- /dev/null +++ b/robosystems_client/models/backup_download_url_response.py @@ -0,0 +1,92 @@ +from collections.abc import Mapping +from typing import Any, TypeVar + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="BackupDownloadUrlResponse") + + +@_attrs_define +class BackupDownloadUrlResponse: + """Response model for backup download URL generation. + + Attributes: + download_url (str): Pre-signed S3 URL for downloading the backup file + expires_in (int): URL expiration time in seconds from now + expires_at (float): Unix timestamp when the URL expires + backup_id (str): Backup identifier + graph_id (str): Graph database identifier + """ + + download_url: str + expires_in: int + expires_at: float + backup_id: str + graph_id: str + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + download_url = self.download_url + + expires_in = self.expires_in + + expires_at = self.expires_at + + backup_id = self.backup_id + + graph_id = self.graph_id + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "download_url": download_url, + "expires_in": expires_in, + "expires_at": expires_at, + "backup_id": backup_id, + "graph_id": graph_id, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + download_url = d.pop("download_url") + + expires_in = d.pop("expires_in") + + expires_at = d.pop("expires_at") + + backup_id = d.pop("backup_id") + + graph_id = d.pop("graph_id") + + backup_download_url_response = cls( + download_url=download_url, + expires_in=expires_in, + expires_at=expires_at, + backup_id=backup_id, + graph_id=graph_id, + ) + + backup_download_url_response.additional_properties = d + return backup_download_url_response + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/backup_limits.py b/robosystems_client/models/backup_limits.py new file mode 100644 index 0000000..b5cc070 --- /dev/null +++ b/robosystems_client/models/backup_limits.py @@ -0,0 +1,76 @@ +from collections.abc import Mapping +from typing import Any, TypeVar + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="BackupLimits") + + +@_attrs_define +class BackupLimits: + """Backup operation limits. + + Attributes: + max_backup_size_gb (float): Maximum backup size in GB + backup_retention_days (int): Backup retention period in days + max_backups_per_day (int): Maximum backups per day + """ + + max_backup_size_gb: float + backup_retention_days: int + max_backups_per_day: int + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + max_backup_size_gb = self.max_backup_size_gb + + backup_retention_days = self.backup_retention_days + + max_backups_per_day = self.max_backups_per_day + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "max_backup_size_gb": max_backup_size_gb, + "backup_retention_days": backup_retention_days, + "max_backups_per_day": max_backups_per_day, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + max_backup_size_gb = d.pop("max_backup_size_gb") + + backup_retention_days = d.pop("backup_retention_days") + + max_backups_per_day = d.pop("max_backups_per_day") + + backup_limits = cls( + max_backup_size_gb=max_backup_size_gb, + backup_retention_days=backup_retention_days, + max_backups_per_day=max_backups_per_day, + ) + + backup_limits.additional_properties = d + return backup_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/batch_agent_request.py b/robosystems_client/models/batch_agent_request.py index a888b3d..616b0c0 100644 --- a/robosystems_client/models/batch_agent_request.py +++ b/robosystems_client/models/batch_agent_request.py @@ -18,7 +18,7 @@ class BatchAgentRequest: """Request for batch processing multiple queries. Attributes: - queries (list['AgentRequest']): List of queries to process + queries (list['AgentRequest']): List of queries to process (max 10) parallel (Union[Unset, bool]): Process queries in parallel Default: False. """ diff --git a/robosystems_client/models/batch_agent_response.py b/robosystems_client/models/batch_agent_response.py index 026853d..3c94e47 100644 --- a/robosystems_client/models/batch_agent_response.py +++ b/robosystems_client/models/batch_agent_response.py @@ -16,8 +16,8 @@ class BatchAgentResponse: """Response for batch processing. Attributes: - results (list['AgentResponse']): List of agent responses - total_execution_time (float): Total execution time + results (list['AgentResponse']): List of agent responses (includes successes and failures) + total_execution_time (float): Total execution time in seconds parallel_processed (bool): Whether queries were processed in parallel """ diff --git a/robosystems_client/models/copy_operation_limits.py b/robosystems_client/models/copy_operation_limits.py new file mode 100644 index 0000000..ff27f21 --- /dev/null +++ b/robosystems_client/models/copy_operation_limits.py @@ -0,0 +1,100 @@ +from collections.abc import Mapping +from typing import Any, TypeVar, cast + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="CopyOperationLimits") + + +@_attrs_define +class CopyOperationLimits: + """Copy/ingestion operation limits. + + Attributes: + max_file_size_gb (float): Maximum file size in GB + timeout_seconds (int): Operation timeout in seconds + concurrent_operations (int): Maximum concurrent operations + max_files_per_operation (int): Maximum files per operation + daily_copy_operations (int): Daily operation limit + supported_formats (list[str]): Supported file formats + """ + + max_file_size_gb: float + timeout_seconds: int + concurrent_operations: int + max_files_per_operation: int + daily_copy_operations: int + supported_formats: list[str] + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + max_file_size_gb = self.max_file_size_gb + + timeout_seconds = self.timeout_seconds + + concurrent_operations = self.concurrent_operations + + max_files_per_operation = self.max_files_per_operation + + daily_copy_operations = self.daily_copy_operations + + supported_formats = self.supported_formats + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "max_file_size_gb": max_file_size_gb, + "timeout_seconds": timeout_seconds, + "concurrent_operations": concurrent_operations, + "max_files_per_operation": max_files_per_operation, + "daily_copy_operations": daily_copy_operations, + "supported_formats": supported_formats, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + max_file_size_gb = d.pop("max_file_size_gb") + + timeout_seconds = d.pop("timeout_seconds") + + concurrent_operations = d.pop("concurrent_operations") + + max_files_per_operation = d.pop("max_files_per_operation") + + daily_copy_operations = d.pop("daily_copy_operations") + + supported_formats = cast(list[str], d.pop("supported_formats")) + + copy_operation_limits = cls( + max_file_size_gb=max_file_size_gb, + timeout_seconds=timeout_seconds, + concurrent_operations=concurrent_operations, + max_files_per_operation=max_files_per_operation, + daily_copy_operations=daily_copy_operations, + supported_formats=supported_formats, + ) + + copy_operation_limits.additional_properties = d + return copy_operation_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/create_graph_request.py b/robosystems_client/models/create_graph_request.py index 8530ca6..55595cb 100644 --- a/robosystems_client/models/create_graph_request.py +++ b/robosystems_client/models/create_graph_request.py @@ -19,23 +19,22 @@ class CreateGraphRequest: """Request model for creating a new graph. - Example: - {'initial_entity': {'cik': '0001234567', 'name': 'Acme Consulting LLC', 'uri': 'https://acmeconsulting.com'}, - 'instance_tier': 'kuzu-standard', 'metadata': {'description': 'Professional consulting services with full - accounting integration', 'graph_name': 'Acme Consulting LLC', 'schema_extensions': ['roboledger']}, 'tags': - ['consulting', 'professional-services']} - - Attributes: - metadata (GraphMetadata): Metadata for graph creation. - instance_tier (Union[Unset, str]): Instance tier: kuzu-standard, kuzu-large, kuzu-xlarge, neo4j-community-large, - neo4j-enterprise-xlarge Default: 'kuzu-standard'. - custom_schema (Union['CustomSchemaDefinition', None, Unset]): Custom schema definition to apply - initial_entity (Union['InitialEntityData', None, Unset]): Optional initial entity to create in the graph. If - provided, creates a entity-focused graph. - create_entity (Union[Unset, bool]): Whether to create the entity node and upload initial data. Only applies when - initial_entity is provided. Set to False to create graph without populating entity data (useful for file-based - ingestion workflows). Default: True. - tags (Union[Unset, list[str]]): Optional tags for organization + Use this to create either: + - **Entity graphs**: Standard graphs with entity schema and optional extensions + - **Custom graphs**: Generic graphs with fully custom schema definitions + + Attributes: + metadata (GraphMetadata): Metadata for graph creation. + instance_tier (Union[Unset, str]): Instance tier: kuzu-standard, kuzu-large, kuzu-xlarge, neo4j-community-large, + neo4j-enterprise-xlarge Default: 'kuzu-standard'. + custom_schema (Union['CustomSchemaDefinition', None, Unset]): Custom schema definition to apply. If provided, + creates a generic custom graph. If omitted, creates an entity graph using schema_extensions. + initial_entity (Union['InitialEntityData', None, Unset]): Optional initial entity to create in the graph. If + provided with entity graph, populates the first entity node. + create_entity (Union[Unset, bool]): Whether to create the entity node and upload initial data. Only applies when + initial_entity is provided. Set to False to create graph without populating entity data (useful for file-based + ingestion workflows). Default: True. + tags (Union[Unset, list[str]]): Optional tags for organization """ metadata: "GraphMetadata" diff --git a/robosystems_client/models/credit_limits.py b/robosystems_client/models/credit_limits.py new file mode 100644 index 0000000..ab7bc18 --- /dev/null +++ b/robosystems_client/models/credit_limits.py @@ -0,0 +1,84 @@ +from collections.abc import Mapping +from typing import Any, TypeVar + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="CreditLimits") + + +@_attrs_define +class CreditLimits: + """AI credit limits (optional). + + Attributes: + monthly_ai_credits (int): Monthly AI credits allocation + current_balance (int): Current credit balance + storage_billing_enabled (bool): Whether storage billing is enabled + storage_rate_per_gb_per_day (int): Storage billing rate per GB per day + """ + + monthly_ai_credits: int + current_balance: int + storage_billing_enabled: bool + storage_rate_per_gb_per_day: int + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + monthly_ai_credits = self.monthly_ai_credits + + current_balance = self.current_balance + + storage_billing_enabled = self.storage_billing_enabled + + storage_rate_per_gb_per_day = self.storage_rate_per_gb_per_day + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "monthly_ai_credits": monthly_ai_credits, + "current_balance": current_balance, + "storage_billing_enabled": storage_billing_enabled, + "storage_rate_per_gb_per_day": storage_rate_per_gb_per_day, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + monthly_ai_credits = d.pop("monthly_ai_credits") + + current_balance = d.pop("current_balance") + + storage_billing_enabled = d.pop("storage_billing_enabled") + + storage_rate_per_gb_per_day = d.pop("storage_rate_per_gb_per_day") + + credit_limits = cls( + monthly_ai_credits=monthly_ai_credits, + current_balance=current_balance, + storage_billing_enabled=storage_billing_enabled, + storage_rate_per_gb_per_day=storage_rate_per_gb_per_day, + ) + + credit_limits.additional_properties = d + return credit_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/custom_schema_definition.py b/robosystems_client/models/custom_schema_definition.py index d79dadb..0d3fb07 100644 --- a/robosystems_client/models/custom_schema_definition.py +++ b/robosystems_client/models/custom_schema_definition.py @@ -21,16 +21,20 @@ @_attrs_define class CustomSchemaDefinition: - """Custom schema definition for custom graphs. - - Attributes: - name (str): Schema name - version (Union[Unset, str]): Schema version Default: '1.0.0'. - description (Union[None, Unset, str]): Schema description - extends (Union[None, Unset, str]): Base schema to extend (e.g., 'base') - nodes (Union[Unset, list['CustomSchemaDefinitionNodesItem']]): List of node definitions with properties - relationships (Union[Unset, list['CustomSchemaDefinitionRelationshipsItem']]): List of relationship definitions - metadata (Union[Unset, CustomSchemaDefinitionMetadata]): Additional schema metadata + """Custom schema definition for generic graphs. + + This model allows you to define custom node types, relationship types, and properties + for graphs that don't fit the standard entity-based schema. Perfect for domain-specific + applications like inventory systems, org charts, project management, etc. + + Attributes: + name (str): Schema name + version (Union[Unset, str]): Schema version Default: '1.0.0'. + description (Union[None, Unset, str]): Schema description + extends (Union[None, Unset, str]): Base schema to extend (e.g., 'base' for common utilities) + nodes (Union[Unset, list['CustomSchemaDefinitionNodesItem']]): List of node definitions with properties + relationships (Union[Unset, list['CustomSchemaDefinitionRelationshipsItem']]): List of relationship definitions + metadata (Union[Unset, CustomSchemaDefinitionMetadata]): Additional schema metadata """ name: str diff --git a/robosystems_client/models/execute_cypher_query_response_200.py b/robosystems_client/models/execute_cypher_query_response_200.py new file mode 100644 index 0000000..045068b --- /dev/null +++ b/robosystems_client/models/execute_cypher_query_response_200.py @@ -0,0 +1,135 @@ +from collections.abc import Mapping +from typing import TYPE_CHECKING, Any, TypeVar, Union, cast + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.execute_cypher_query_response_200_data_item import ( + ExecuteCypherQueryResponse200DataItem, + ) + + +T = TypeVar("T", bound="ExecuteCypherQueryResponse200") + + +@_attrs_define +class ExecuteCypherQueryResponse200: + """ + Attributes: + success (Union[Unset, bool]): + data (Union[Unset, list['ExecuteCypherQueryResponse200DataItem']]): + columns (Union[Unset, list[str]]): + row_count (Union[Unset, int]): + execution_time_ms (Union[Unset, float]): + graph_id (Union[Unset, str]): + timestamp (Union[Unset, str]): + """ + + success: Union[Unset, bool] = UNSET + data: Union[Unset, list["ExecuteCypherQueryResponse200DataItem"]] = UNSET + columns: Union[Unset, list[str]] = UNSET + row_count: Union[Unset, int] = UNSET + execution_time_ms: Union[Unset, float] = UNSET + graph_id: Union[Unset, str] = UNSET + timestamp: Union[Unset, str] = UNSET + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + success = self.success + + data: Union[Unset, list[dict[str, Any]]] = UNSET + if not isinstance(self.data, Unset): + data = [] + for data_item_data in self.data: + data_item = data_item_data.to_dict() + data.append(data_item) + + columns: Union[Unset, list[str]] = UNSET + if not isinstance(self.columns, Unset): + columns = self.columns + + row_count = self.row_count + + execution_time_ms = self.execution_time_ms + + graph_id = self.graph_id + + timestamp = self.timestamp + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update({}) + if success is not UNSET: + field_dict["success"] = success + if data is not UNSET: + field_dict["data"] = data + if columns is not UNSET: + field_dict["columns"] = columns + if row_count is not UNSET: + field_dict["row_count"] = row_count + if execution_time_ms is not UNSET: + field_dict["execution_time_ms"] = execution_time_ms + if graph_id is not UNSET: + field_dict["graph_id"] = graph_id + if timestamp is not UNSET: + field_dict["timestamp"] = timestamp + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + from ..models.execute_cypher_query_response_200_data_item import ( + ExecuteCypherQueryResponse200DataItem, + ) + + d = dict(src_dict) + success = d.pop("success", UNSET) + + data = [] + _data = d.pop("data", UNSET) + for data_item_data in _data or []: + data_item = ExecuteCypherQueryResponse200DataItem.from_dict(data_item_data) + + data.append(data_item) + + columns = cast(list[str], d.pop("columns", UNSET)) + + row_count = d.pop("row_count", UNSET) + + execution_time_ms = d.pop("execution_time_ms", UNSET) + + graph_id = d.pop("graph_id", UNSET) + + timestamp = d.pop("timestamp", UNSET) + + execute_cypher_query_response_200 = cls( + success=success, + data=data, + columns=columns, + row_count=row_count, + execution_time_ms=execution_time_ms, + graph_id=graph_id, + timestamp=timestamp, + ) + + execute_cypher_query_response_200.additional_properties = d + return execute_cypher_query_response_200 + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/get_graph_limits_response_getgraphlimits.py b/robosystems_client/models/execute_cypher_query_response_200_data_item.py similarity index 76% rename from robosystems_client/models/get_graph_limits_response_getgraphlimits.py rename to robosystems_client/models/execute_cypher_query_response_200_data_item.py index bd80ebd..6510c66 100644 --- a/robosystems_client/models/get_graph_limits_response_getgraphlimits.py +++ b/robosystems_client/models/execute_cypher_query_response_200_data_item.py @@ -4,11 +4,11 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -T = TypeVar("T", bound="GetGraphLimitsResponseGetgraphlimits") +T = TypeVar("T", bound="ExecuteCypherQueryResponse200DataItem") @_attrs_define -class GetGraphLimitsResponseGetgraphlimits: +class ExecuteCypherQueryResponse200DataItem: """ """ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) @@ -22,10 +22,10 @@ def to_dict(self) -> dict[str, Any]: @classmethod def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: d = dict(src_dict) - get_graph_limits_response_getgraphlimits = cls() + execute_cypher_query_response_200_data_item = cls() - get_graph_limits_response_getgraphlimits.additional_properties = d - return get_graph_limits_response_getgraphlimits + execute_cypher_query_response_200_data_item.additional_properties = d + return execute_cypher_query_response_200_data_item @property def additional_keys(self) -> list[str]: diff --git a/robosystems_client/models/get_backup_download_url_response_getbackupdownloadurl.py b/robosystems_client/models/get_backup_download_url_response_getbackupdownloadurl.py deleted file mode 100644 index 18e169a..0000000 --- a/robosystems_client/models/get_backup_download_url_response_getbackupdownloadurl.py +++ /dev/null @@ -1,44 +0,0 @@ -from collections.abc import Mapping -from typing import Any, TypeVar - -from attrs import define as _attrs_define -from attrs import field as _attrs_field - -T = TypeVar("T", bound="GetBackupDownloadUrlResponseGetbackupdownloadurl") - - -@_attrs_define -class GetBackupDownloadUrlResponseGetbackupdownloadurl: - """ """ - - additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> dict[str, Any]: - field_dict: dict[str, Any] = {} - field_dict.update(self.additional_properties) - - return field_dict - - @classmethod - def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: - d = dict(src_dict) - get_backup_download_url_response_getbackupdownloadurl = cls() - - get_backup_download_url_response_getbackupdownloadurl.additional_properties = d - return get_backup_download_url_response_getbackupdownloadurl - - @property - def additional_keys(self) -> list[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/robosystems_client/models/graph_limits_response.py b/robosystems_client/models/graph_limits_response.py new file mode 100644 index 0000000..626c6a8 --- /dev/null +++ b/robosystems_client/models/graph_limits_response.py @@ -0,0 +1,174 @@ +from collections.abc import Mapping +from typing import TYPE_CHECKING, Any, TypeVar, Union, cast + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.backup_limits import BackupLimits + from ..models.copy_operation_limits import CopyOperationLimits + from ..models.credit_limits import CreditLimits + from ..models.query_limits import QueryLimits + from ..models.rate_limits import RateLimits + from ..models.storage_limits import StorageLimits + + +T = TypeVar("T", bound="GraphLimitsResponse") + + +@_attrs_define +class GraphLimitsResponse: + """Response model for comprehensive graph operational limits. + + Attributes: + graph_id (str): Graph database identifier + subscription_tier (str): User's subscription tier + graph_tier (str): Graph's database tier + is_shared_repository (bool): Whether this is a shared repository + storage (StorageLimits): Storage limits information. + queries (QueryLimits): Query operation limits. + copy_operations (CopyOperationLimits): Copy/ingestion operation limits. + backups (BackupLimits): Backup operation limits. + rate_limits (RateLimits): API rate limits. + credits_ (Union['CreditLimits', None, Unset]): AI credit limits (if applicable) + """ + + graph_id: str + subscription_tier: str + graph_tier: str + is_shared_repository: bool + storage: "StorageLimits" + queries: "QueryLimits" + copy_operations: "CopyOperationLimits" + backups: "BackupLimits" + rate_limits: "RateLimits" + credits_: Union["CreditLimits", None, Unset] = UNSET + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + from ..models.credit_limits import CreditLimits + + graph_id = self.graph_id + + subscription_tier = self.subscription_tier + + graph_tier = self.graph_tier + + is_shared_repository = self.is_shared_repository + + storage = self.storage.to_dict() + + queries = self.queries.to_dict() + + copy_operations = self.copy_operations.to_dict() + + backups = self.backups.to_dict() + + rate_limits = self.rate_limits.to_dict() + + credits_: Union[None, Unset, dict[str, Any]] + if isinstance(self.credits_, Unset): + credits_ = UNSET + elif isinstance(self.credits_, CreditLimits): + credits_ = self.credits_.to_dict() + else: + credits_ = self.credits_ + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "graph_id": graph_id, + "subscription_tier": subscription_tier, + "graph_tier": graph_tier, + "is_shared_repository": is_shared_repository, + "storage": storage, + "queries": queries, + "copy_operations": copy_operations, + "backups": backups, + "rate_limits": rate_limits, + } + ) + if credits_ is not UNSET: + field_dict["credits"] = credits_ + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + from ..models.backup_limits import BackupLimits + from ..models.copy_operation_limits import CopyOperationLimits + from ..models.credit_limits import CreditLimits + from ..models.query_limits import QueryLimits + from ..models.rate_limits import RateLimits + from ..models.storage_limits import StorageLimits + + d = dict(src_dict) + graph_id = d.pop("graph_id") + + subscription_tier = d.pop("subscription_tier") + + graph_tier = d.pop("graph_tier") + + is_shared_repository = d.pop("is_shared_repository") + + storage = StorageLimits.from_dict(d.pop("storage")) + + queries = QueryLimits.from_dict(d.pop("queries")) + + copy_operations = CopyOperationLimits.from_dict(d.pop("copy_operations")) + + backups = BackupLimits.from_dict(d.pop("backups")) + + rate_limits = RateLimits.from_dict(d.pop("rate_limits")) + + def _parse_credits_(data: object) -> Union["CreditLimits", None, Unset]: + if data is None: + return data + if isinstance(data, Unset): + return data + try: + if not isinstance(data, dict): + raise TypeError() + credits_type_0 = CreditLimits.from_dict(data) + + return credits_type_0 + except: # noqa: E722 + pass + return cast(Union["CreditLimits", None, Unset], data) + + credits_ = _parse_credits_(d.pop("credits", UNSET)) + + graph_limits_response = cls( + graph_id=graph_id, + subscription_tier=subscription_tier, + graph_tier=graph_tier, + is_shared_repository=is_shared_repository, + storage=storage, + queries=queries, + copy_operations=copy_operations, + backups=backups, + rate_limits=rate_limits, + credits_=credits_, + ) + + graph_limits_response.additional_properties = d + return graph_limits_response + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/initial_entity_data.py b/robosystems_client/models/initial_entity_data.py index 676a998..571d11d 100644 --- a/robosystems_client/models/initial_entity_data.py +++ b/robosystems_client/models/initial_entity_data.py @@ -11,18 +11,21 @@ @_attrs_define class InitialEntityData: - """Initial entity data for graph creation. - - Attributes: - name (str): Entity name - uri (str): Entity website or URI - cik (Union[None, Unset, str]): CIK number for SEC filings - sic (Union[None, Unset, str]): SIC code - sic_description (Union[None, Unset, str]): SIC description - category (Union[None, Unset, str]): Business category - state_of_incorporation (Union[None, Unset, str]): State of incorporation - fiscal_year_end (Union[None, Unset, str]): Fiscal year end (MMDD) - ein (Union[None, Unset, str]): Employer Identification Number + """Initial entity data for entity-focused graph creation. + + When creating an entity graph with an initial entity node, this model defines + the entity's identifying information and metadata. + + Attributes: + name (str): Entity name + uri (str): Entity website or URI + cik (Union[None, Unset, str]): CIK number for SEC filings + sic (Union[None, Unset, str]): SIC code + sic_description (Union[None, Unset, str]): SIC description + category (Union[None, Unset, str]): Business category + state_of_incorporation (Union[None, Unset, str]): State of incorporation + fiscal_year_end (Union[None, Unset, str]): Fiscal year end (MMDD) + ein (Union[None, Unset, str]): Employer Identification Number """ name: str diff --git a/robosystems_client/models/query_limits.py b/robosystems_client/models/query_limits.py new file mode 100644 index 0000000..1cbaf71 --- /dev/null +++ b/robosystems_client/models/query_limits.py @@ -0,0 +1,84 @@ +from collections.abc import Mapping +from typing import Any, TypeVar + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="QueryLimits") + + +@_attrs_define +class QueryLimits: + """Query operation limits. + + Attributes: + max_timeout_seconds (int): Maximum query timeout in seconds + chunk_size (int): Maximum chunk size for result streaming + max_rows_per_query (int): Maximum rows returned per query + concurrent_queries (int): Maximum concurrent queries allowed + """ + + max_timeout_seconds: int + chunk_size: int + max_rows_per_query: int + concurrent_queries: int + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + max_timeout_seconds = self.max_timeout_seconds + + chunk_size = self.chunk_size + + max_rows_per_query = self.max_rows_per_query + + concurrent_queries = self.concurrent_queries + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "max_timeout_seconds": max_timeout_seconds, + "chunk_size": chunk_size, + "max_rows_per_query": max_rows_per_query, + "concurrent_queries": concurrent_queries, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + max_timeout_seconds = d.pop("max_timeout_seconds") + + chunk_size = d.pop("chunk_size") + + max_rows_per_query = d.pop("max_rows_per_query") + + concurrent_queries = d.pop("concurrent_queries") + + query_limits = cls( + max_timeout_seconds=max_timeout_seconds, + chunk_size=chunk_size, + max_rows_per_query=max_rows_per_query, + concurrent_queries=concurrent_queries, + ) + + query_limits.additional_properties = d + return query_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/rate_limits.py b/robosystems_client/models/rate_limits.py new file mode 100644 index 0000000..643b352 --- /dev/null +++ b/robosystems_client/models/rate_limits.py @@ -0,0 +1,76 @@ +from collections.abc import Mapping +from typing import Any, TypeVar + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +T = TypeVar("T", bound="RateLimits") + + +@_attrs_define +class RateLimits: + """API rate limits. + + Attributes: + requests_per_minute (int): Requests per minute limit + requests_per_hour (int): Requests per hour limit + burst_capacity (int): Burst capacity for short spikes + """ + + requests_per_minute: int + requests_per_hour: int + burst_capacity: int + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + requests_per_minute = self.requests_per_minute + + requests_per_hour = self.requests_per_hour + + burst_capacity = self.burst_capacity + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "requests_per_minute": requests_per_minute, + "requests_per_hour": requests_per_hour, + "burst_capacity": burst_capacity, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + requests_per_minute = d.pop("requests_per_minute") + + requests_per_hour = d.pop("requests_per_hour") + + burst_capacity = d.pop("burst_capacity") + + rate_limits = cls( + requests_per_minute=requests_per_minute, + requests_per_hour=requests_per_hour, + burst_capacity=burst_capacity, + ) + + rate_limits.additional_properties = d + return rate_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/robosystems_client/models/storage_limits.py b/robosystems_client/models/storage_limits.py new file mode 100644 index 0000000..30c9c41 --- /dev/null +++ b/robosystems_client/models/storage_limits.py @@ -0,0 +1,90 @@ +from collections.abc import Mapping +from typing import Any, TypeVar, Union, cast + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +T = TypeVar("T", bound="StorageLimits") + + +@_attrs_define +class StorageLimits: + """Storage limits information. + + Attributes: + max_storage_gb (float): Maximum storage limit in GB + approaching_limit (bool): Whether approaching storage limit (>80%) + current_usage_gb (Union[None, Unset, float]): Current storage usage in GB + """ + + max_storage_gb: float + approaching_limit: bool + current_usage_gb: Union[None, Unset, float] = UNSET + additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> dict[str, Any]: + max_storage_gb = self.max_storage_gb + + approaching_limit = self.approaching_limit + + current_usage_gb: Union[None, Unset, float] + if isinstance(self.current_usage_gb, Unset): + current_usage_gb = UNSET + else: + current_usage_gb = self.current_usage_gb + + field_dict: dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "max_storage_gb": max_storage_gb, + "approaching_limit": approaching_limit, + } + ) + if current_usage_gb is not UNSET: + field_dict["current_usage_gb"] = current_usage_gb + + return field_dict + + @classmethod + def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: + d = dict(src_dict) + max_storage_gb = d.pop("max_storage_gb") + + approaching_limit = d.pop("approaching_limit") + + def _parse_current_usage_gb(data: object) -> Union[None, Unset, float]: + if data is None: + return data + if isinstance(data, Unset): + return data + return cast(Union[None, Unset, float], data) + + current_usage_gb = _parse_current_usage_gb(d.pop("current_usage_gb", UNSET)) + + storage_limits = cls( + max_storage_gb=max_storage_gb, + approaching_limit=approaching_limit, + current_usage_gb=current_usage_gb, + ) + + storage_limits.additional_properties = d + return storage_limits + + @property + def additional_keys(self) -> list[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties