From 4cb3b0f30286619b4332d68c929d74c601199fb6 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Jan 2026 17:37:35 +0100 Subject: [PATCH 1/4] Upgrade Go SDK to 0.99.0 --- .codegen/_openapi_sha | 2 +- .github/workflows/tagging.yml | 18 +- .../internal/schema/annotations_openapi.yml | 88 ++++ .../validation/generated/enum_fields.go | 3 +- .../validation/generated/required_fields.go | 43 +- bundle/schema/jsonschema.json | 106 ++++- .../network-connectivity.go | 2 + cmd/account/settings-v2/settings-v2.go | 223 ++++++++- cmd/workspace/apps/apps.go | 4 +- cmd/workspace/clusters/clusters.go | 4 + .../git-credentials/git-credentials.go | 28 +- .../instance-pools/instance-pools.go | 2 + cmd/workspace/jobs/jobs.go | 9 +- cmd/workspace/postgres/postgres.go | 408 ++++++++++++++--- cmd/workspace/providers/providers.go | 8 +- .../quality-monitor-v2/quality-monitor-v2.go | 25 +- .../quality-monitors/quality-monitors.go | 40 +- cmd/workspace/recipients/recipients.go | 9 +- cmd/workspace/warehouses/warehouses.go | 430 ++++++++++++++++++ .../workspace-settings-v2.go | 2 + go.mod | 2 +- go.sum | 4 +- python/databricks/bundles/jobs/__init__.py | 8 + .../bundles/jobs/_models/cluster_spec.py | 24 + .../bundles/jobs/_models/dashboard_task.py | 5 +- python/databricks/bundles/jobs/_models/job.py | 5 +- .../jobs/_models/node_type_flexibility.py | 40 ++ .../databricks/bundles/pipelines/__init__.py | 16 + .../_models/auto_full_refresh_policy.py | 54 +++ .../bundles/pipelines/_models/day_of_week.py | 2 - .../_models/ingestion_pipeline_definition.py | 14 + .../_models/operation_time_window.py | 69 +++ .../_models/table_specific_config.py | 34 ++ 33 files changed, 1602 insertions(+), 129 deletions(-) create mode 100644 python/databricks/bundles/jobs/_models/node_type_flexibility.py create mode 100644 python/databricks/bundles/pipelines/_models/auto_full_refresh_policy.py create mode 100644 python/databricks/bundles/pipelines/_models/operation_time_window.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a9ea4ce63e..08f378e305 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -dbf9b0a4e0432e846520442b14c34fc7f0ca0d8c \ No newline at end of file +76dbe1cb1a0a017a4484757cb4e542a30a87e9b3 \ No newline at end of file diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml index 15c8060dd9..6ccb26ad52 100644 --- a/.github/workflows/tagging.yml +++ b/.github/workflows/tagging.yml @@ -2,10 +2,14 @@ name: tagging on: + # Manual dispatch. workflow_dispatch: - # Enable for automatic tagging - #schedule: - # - cron: '0 0 * * TUE' + # No inputs are required for the manual dispatch. + + # Runs at 8:00 UTC on Tuesday, Wednesday, and Thursday. To enable automated + # tagging for a repository, simply add it to the if block of the tag job. + schedule: + - cron: '0 8 * * TUE,WED,THU' # Ensure that only a single instance of the workflow is running at a time. concurrency: @@ -13,6 +17,14 @@ concurrency: jobs: tag: + # Only run the tag job if the trigger is manual (workflow_dispatch) or + # the repository has been approved for automated releases. + # + # To disable release for a repository, simply exclude it from the if + # condition. + if: >- + github.event_name == 'workflow_dispatch' || + github.repository == 'databricks/databricks-sdk-go' environment: "release-is" runs-on: group: databricks-deco-testing-runner-group diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index d9d506a7b6..d0276f1dee 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -242,6 +242,9 @@ github.com/databricks/cli/bundle/config/resources.Cluster: The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not assigned. + "driver_node_type_flexibility": + "description": |- + Flexible node type configuration for the driver node. "driver_node_type_id": "description": |- The node type of the Spark driver. @@ -356,6 +359,9 @@ github.com/databricks/cli/bundle/config/resources.Cluster: This field can only be used when `kind = CLASSIC_PREVIEW`. `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "worker_node_type_flexibility": + "description": |- + Flexible node type configuration for worker nodes. "workload_type": "description": |- Cluster Attributes showing for clusters workload types. @@ -402,6 +408,8 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "effective_capacity": "description": |- Deprecated. The sku of the instance; this field will always match the value of capacity. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "deprecation_message": |- This field is deprecated "x-databricks-field-behaviors_output_only": |- @@ -409,38 +417,52 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "effective_custom_tags": "description": |- The recorded custom tags associated with the instance. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_enable_pg_native_login": "description": |- Whether the instance has PG native password login enabled. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_enable_readable_secondaries": "description": |- Whether secondaries serving read-only traffic are enabled. Defaults to false. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_node_count": "description": |- The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to 1 primary and 0 secondaries. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_retention_window_in_days": "description": |- The retention window for the instance. This is the time window in days for which the historical data is retained. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_stopped": "description": |- Whether the instance is stopped. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_usage_policy_id": "description": |- The policy that is applied to the instance. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "enable_pg_native_login": @@ -990,11 +1012,15 @@ github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "description": |- The name of the database instance that this table is registered to. This field is always returned, and for tables inside database catalogs is inferred database instance associated with the catalog. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "effective_logical_database_name": "description": |- The name of the logical database that this table is registered to. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "logical_database_name": @@ -1790,6 +1816,9 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not assigned. + "driver_node_type_flexibility": + "description": |- + Flexible node type configuration for the driver node. "driver_node_type_id": "description": |- The node type of the Spark driver. @@ -1904,6 +1933,9 @@ github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec: This field can only be used when `kind = CLASSIC_PREVIEW`. `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + "worker_node_type_flexibility": + "description": |- + Flexible node type configuration for worker nodes. "workload_type": "description": |- Cluster Attributes showing for clusters workload types. @@ -2164,6 +2196,13 @@ github.com/databricks/databricks-sdk-go/service/compute.MavenLibrary: "description": |- Maven repo to install the Maven package from. If omitted, both Maven Central Repository and Spark Packages are searched. +github.com/databricks/databricks-sdk-go/service/compute.NodeTypeFlexibility: + "_": + "description": |- + Configuration for flexible node types, allowing fallback to alternate node types during cluster launch and upscale. + "alternate_node_type_ids": + "description": |- + A list of node type IDs to use as fallbacks when the primary node type is unavailable. github.com/databricks/databricks-sdk-go/service/compute.PythonPyPiLibrary: "package": "description": |- @@ -2287,6 +2326,8 @@ github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef: instance was created. For a child ref instance, this is the LSN on the instance from which the child instance was created. + This is an output only field that contains the value computed from the input field combined with + server side defaults. Use the field without the effective_ prefix to set the value. "x-databricks-field-behaviors_output_only": |- true "lsn": @@ -2904,6 +2945,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.JobDeployment: The kind of deployment that manages the job. * `BUNDLE`: The job is managed by Databricks Asset Bundle. + * `SYSTEM_MANAGED`: The job is managed by Databricks and is read-only. "metadata_file_path": "description": |- Path of the file that contains deployment metadata. @@ -2911,9 +2953,12 @@ github.com/databricks/databricks-sdk-go/service/jobs.JobDeploymentKind: "_": "description": |- * `BUNDLE`: The job is managed by Databricks Asset Bundle. + * `SYSTEM_MANAGED`: The job is managed by Databricks and is read-only. "enum": - |- BUNDLE + - |- + SYSTEM_MANAGED github.com/databricks/databricks-sdk-go/service/jobs.JobEditMode: "_": "description": |- @@ -3766,6 +3811,18 @@ github.com/databricks/databricks-sdk-go/service/ml.ModelTag: "value": "description": |- The tag value. +github.com/databricks/databricks-sdk-go/service/pipelines.AutoFullRefreshPolicy: + "_": + "description": |- + Policy for auto full refresh. + "enabled": + "description": |- + (Required, Mutable) Whether to enable auto full refresh or not. + "min_interval_hours": + "description": |- + (Optional, Mutable) Specify the minimum interval in hours between the timestamp + at which a table was last full refreshed and the current timestamp for triggering auto full + If unspecified and autoFullRefresh is enabled then by default min_interval_hours is 24 hours. github.com/databricks/databricks-sdk-go/service/pipelines.ConnectionParameters: "source_catalog": "description": |- @@ -3869,6 +3926,9 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "connection_name": "description": |- Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on. + "full_refresh_window": + "description": |- + (Optional) A window that specifies a set of time ranges for snapshot queries in CDC. "ingest_from_uc_foreign_catalog": "description": |- Immutable. If set to true, the pipeline will ingest tables from the @@ -4025,6 +4085,21 @@ github.com/databricks/databricks-sdk-go/service/pipelines.Notifications: "email_recipients": "description": |- A list of email addresses notified when a configured alert is triggered. +github.com/databricks/databricks-sdk-go/service/pipelines.OperationTimeWindow: + "_": + "description": |- + Proto representing a window + "days_of_week": + "description": |- + Days of week in which the window is allowed to happen + If not specified all days of the week will be used. + "start_hour": + "description": |- + An integer between 0 and 23 denoting the start hour for the window in the 24-hour day. + "time_zone_id": + "description": |- + Time zone id of window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. github.com/databricks/databricks-sdk-go/service/pipelines.PathPattern: "include": "description": |- @@ -4313,6 +4388,19 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec: "description": |- Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec. github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: + "auto_full_refresh_policy": + "description": |- + (Optional, Mutable) Policy for auto full refresh, if enabled pipeline will automatically try + to fix issues by doing a full refresh on the table in the retry run. auto_full_refresh_policy + in table configuration will override the above level auto_full_refresh_policy. + For example, + { + "auto_full_refresh_policy": { + "enabled": true, + "min_interval_hours": 23, + } + } + If unspecified, auto full refresh is disabled. "exclude_columns": "description": |- A list of column names to be excluded for the ingestion. diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index ecb042e270..ad2647f478 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -47,7 +47,7 @@ var EnumFields = map[string][]string{ "resources.jobs.*.continuous.pause_status": {"PAUSED", "UNPAUSED"}, "resources.jobs.*.continuous.task_retry_mode": {"NEVER", "ON_FAILURE"}, - "resources.jobs.*.deployment.kind": {"BUNDLE"}, + "resources.jobs.*.deployment.kind": {"BUNDLE", "SYSTEM_MANAGED"}, "resources.jobs.*.edit_mode": {"EDITABLE", "UI_LOCKED"}, "resources.jobs.*.format": {"MULTI_TASK", "SINGLE_TASK"}, "resources.jobs.*.git_source.git_provider": {"awsCodeCommit", "azureDevOpsServices", "bitbucketCloud", "bitbucketServer", "gitHub", "gitHubEnterprise", "gitLab", "gitLabEnterpriseEdition"}, @@ -123,6 +123,7 @@ var EnumFields = map[string][]string{ "resources.pipelines.*.clusters[*].azure_attributes.availability": {"ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"}, "resources.pipelines.*.clusters[*].gcp_attributes.availability": {"ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"}, "resources.pipelines.*.deployment.kind": {"BUNDLE"}, + "resources.pipelines.*.ingestion_definition.full_refresh_window.days_of_week[*]": {"FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"}, "resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, "resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, "resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, diff --git a/bundle/internal/validation/generated/required_fields.go b/bundle/internal/validation/generated/required_fields.go index df5397fc1f..de86368557 100644 --- a/bundle/internal/validation/generated/required_fields.go +++ b/bundle/internal/validation/generated/required_fields.go @@ -182,25 +182,30 @@ var RequiredFields = map[string][]string{ "resources.models.*": {"name"}, "resources.models.*.permissions[*]": {"level"}, - "resources.pipelines.*.clusters[*].autoscale": {"max_workers", "min_workers"}, - "resources.pipelines.*.clusters[*].cluster_log_conf.dbfs": {"destination"}, - "resources.pipelines.*.clusters[*].cluster_log_conf.s3": {"destination"}, - "resources.pipelines.*.clusters[*].cluster_log_conf.volumes": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].abfss": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].dbfs": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].file": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].gcs": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].s3": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].volumes": {"destination"}, - "resources.pipelines.*.clusters[*].init_scripts[*].workspace": {"destination"}, - "resources.pipelines.*.deployment": {"kind"}, - "resources.pipelines.*.gateway_definition": {"connection_name", "gateway_storage_catalog", "gateway_storage_schema"}, - "resources.pipelines.*.ingestion_definition.objects[*].report": {"destination_catalog", "destination_schema", "source_url"}, - "resources.pipelines.*.ingestion_definition.objects[*].schema": {"destination_catalog", "destination_schema", "source_schema"}, - "resources.pipelines.*.ingestion_definition.objects[*].table": {"destination_catalog", "destination_schema", "source_table"}, - "resources.pipelines.*.libraries[*].maven": {"coordinates"}, - "resources.pipelines.*.permissions[*]": {"level"}, - "resources.pipelines.*.restart_window": {"start_hour"}, + "resources.pipelines.*.clusters[*].autoscale": {"max_workers", "min_workers"}, + "resources.pipelines.*.clusters[*].cluster_log_conf.dbfs": {"destination"}, + "resources.pipelines.*.clusters[*].cluster_log_conf.s3": {"destination"}, + "resources.pipelines.*.clusters[*].cluster_log_conf.volumes": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].abfss": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].dbfs": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].file": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].gcs": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].s3": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].volumes": {"destination"}, + "resources.pipelines.*.clusters[*].init_scripts[*].workspace": {"destination"}, + "resources.pipelines.*.deployment": {"kind"}, + "resources.pipelines.*.gateway_definition": {"connection_name", "gateway_storage_catalog", "gateway_storage_schema"}, + "resources.pipelines.*.ingestion_definition.full_refresh_window": {"start_hour"}, + "resources.pipelines.*.ingestion_definition.objects[*].report": {"destination_catalog", "destination_schema", "source_url"}, + "resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy": {"enabled"}, + "resources.pipelines.*.ingestion_definition.objects[*].schema": {"destination_catalog", "destination_schema", "source_schema"}, + "resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy": {"enabled"}, + "resources.pipelines.*.ingestion_definition.objects[*].table": {"destination_catalog", "destination_schema", "source_table"}, + "resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy": {"enabled"}, + "resources.pipelines.*.ingestion_definition.table_configuration.auto_full_refresh_policy": {"enabled"}, + "resources.pipelines.*.libraries[*].maven": {"coordinates"}, + "resources.pipelines.*.permissions[*]": {"level"}, + "resources.pipelines.*.restart_window": {"start_hour"}, "resources.quality_monitors.*": {"assets_dir", "output_schema_name", "table_name"}, "resources.quality_monitors.*.custom_metrics[*]": {"definition", "input_columns", "name", "output_data_type", "type"}, diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index b027fbe5e1..af2fa029bd 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -366,6 +366,10 @@ "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", "$ref": "#/$defs/string" }, + "driver_node_type_flexibility": { + "description": "Flexible node type configuration for the driver node.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.NodeTypeFlexibility" + }, "driver_node_type_id": { "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", "$ref": "#/$defs/string" @@ -451,6 +455,10 @@ "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.", "$ref": "#/$defs/bool" }, + "worker_node_type_flexibility": { + "description": "Flexible node type configuration for worker nodes.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.NodeTypeFlexibility" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -4043,6 +4051,10 @@ "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", "$ref": "#/$defs/string" }, + "driver_node_type_flexibility": { + "description": "Flexible node type configuration for the driver node.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.NodeTypeFlexibility" + }, "driver_node_type_id": { "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", "$ref": "#/$defs/string" @@ -4121,6 +4133,10 @@ "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.", "$ref": "#/$defs/bool" }, + "worker_node_type_flexibility": { + "description": "Flexible node type configuration for worker nodes.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.NodeTypeFlexibility" + }, "workload_type": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType" } @@ -4528,6 +4544,25 @@ } ] }, + "compute.NodeTypeFlexibility": { + "oneOf": [ + { + "type": "object", + "description": "Configuration for flexible node types, allowing fallback to alternate node types during cluster launch and upscale.", + "properties": { + "alternate_node_type_ids": { + "description": "A list of node type IDs to use as fallbacks when the primary node type is unavailable.", + "$ref": "#/$defs/slice/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "compute.PythonPyPiLibrary": { "oneOf": [ { @@ -5600,7 +5635,7 @@ "type": "object", "properties": { "kind": { - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n* `SYSTEM_MANAGED`: The job is managed by Databricks and is read-only.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobDeploymentKind" }, "metadata_file_path": { @@ -5623,9 +5658,10 @@ "oneOf": [ { "type": "string", - "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n* `SYSTEM_MANAGED`: The job is managed by Databricks and is read-only.", "enum": [ - "BUNDLE" + "BUNDLE", + "SYSTEM_MANAGED" ] }, { @@ -7151,6 +7187,32 @@ } ] }, + "pipelines.AutoFullRefreshPolicy": { + "oneOf": [ + { + "type": "object", + "description": "Policy for auto full refresh.", + "properties": { + "enabled": { + "description": "(Required, Mutable) Whether to enable auto full refresh or not.", + "$ref": "#/$defs/bool" + }, + "min_interval_hours": { + "description": "(Optional, Mutable) Specify the minimum interval in hours between the timestamp\nat which a table was last full refreshed and the current timestamp for triggering auto full\nIf unspecified and autoFullRefresh is enabled then by default min_interval_hours is 24 hours.", + "$ref": "#/$defs/int" + } + }, + "additionalProperties": false, + "required": [ + "enabled" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.ConnectionParameters": { "oneOf": [ { @@ -7376,6 +7438,10 @@ "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.", "$ref": "#/$defs/string" }, + "full_refresh_window": { + "description": "(Optional) A window that specifies a set of time ranges for snapshot queries in CDC.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.OperationTimeWindow" + }, "ingest_from_uc_foreign_catalog": { "description": "Immutable. If set to true, the pipeline will ingest tables from the\nUC foreign catalogs directly without the need to specify a UC connection or ingestion gateway.\nThe `source_catalog` fields in objects of IngestionConfig are interpreted as\nthe UC foreign catalogs to ingest from.", "$ref": "#/$defs/bool", @@ -7577,6 +7643,36 @@ } ] }, + "pipelines.OperationTimeWindow": { + "oneOf": [ + { + "type": "object", + "description": "Proto representing a window", + "properties": { + "days_of_week": { + "description": "Days of week in which the window is allowed to happen\nIf not specified all days of the week will be used.", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.DayOfWeek" + }, + "start_hour": { + "description": "An integer between 0 and 23 denoting the start hour for the window in the 24-hour day.", + "$ref": "#/$defs/int" + }, + "time_zone_id": { + "description": "Time zone id of window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "start_hour" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.PathPattern": { "oneOf": [ { @@ -8104,6 +8200,10 @@ { "type": "object", "properties": { + "auto_full_refresh_policy": { + "description": "(Optional, Mutable) Policy for auto full refresh, if enabled pipeline will automatically try\nto fix issues by doing a full refresh on the table in the retry run. auto_full_refresh_policy\nin table configuration will override the above level auto_full_refresh_policy.\nFor example,\n{\n\"auto_full_refresh_policy\": {\n\"enabled\": true,\n\"min_interval_hours\": 23,\n}\n}\nIf unspecified, auto full refresh is disabled.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.AutoFullRefreshPolicy" + }, "exclude_columns": { "description": "A list of column names to be excluded for the ingestion.\nWhen not specified, include_columns fully controls what columns to be ingested.\nWhen specified, all other columns including future ones will be automatically included for ingestion.\nThis field in mutually exclusive with `include_columns`.", "$ref": "#/$defs/slice/string" diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 7f402af79c..ab60233467 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -174,6 +174,7 @@ func newCreatePrivateEndpointRule() *cobra.Command { // TODO: array: domain_names cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, "endpoint-service", createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, `The full target AWS endpoint service name that connects to the destination resources of the private endpoint.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.ErrorMessage, "error-message", createPrivateEndpointRuleReq.PrivateEndpointRule.ErrorMessage, ``) cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, "group-id", createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, `Not used by customer-managed private endpoint services.`) cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, "resource-id", createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, `The Azure resource ID of the target resource.`) // TODO: array: resource_names @@ -599,6 +600,7 @@ func newUpdatePrivateEndpointRule() *cobra.Command { // TODO: array: domain_names cmd.Flags().BoolVar(&updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, "enabled", updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, `Only used by private endpoints towards an AWS S3 service.`) + cmd.Flags().StringVar(&updatePrivateEndpointRuleReq.PrivateEndpointRule.ErrorMessage, "error-message", updatePrivateEndpointRuleReq.PrivateEndpointRule.ErrorMessage, ``) // TODO: array: resource_names cmd.Use = "update-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID UPDATE_MASK" diff --git a/cmd/account/settings-v2/settings-v2.go b/cmd/account/settings-v2/settings-v2.go index 01dc85c5d7..eaede7cb70 100755 --- a/cmd/account/settings-v2/settings-v2.go +++ b/cmd/account/settings-v2/settings-v2.go @@ -26,8 +26,11 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newGetPublicAccountSetting()) + cmd.AddCommand(newGetPublicAccountUserPreference()) cmd.AddCommand(newListAccountSettingsMetadata()) + cmd.AddCommand(newListAccountUserPreferencesMetadata()) cmd.AddCommand(newPatchPublicAccountSetting()) + cmd.AddCommand(newPatchPublicAccountUserPreference()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -92,6 +95,70 @@ func newGetPublicAccountSetting() *cobra.Command { return cmd } +// start get-public-account-user-preference command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPublicAccountUserPreferenceOverrides []func( + *cobra.Command, + *settingsv2.GetPublicAccountUserPreferenceRequest, +) + +func newGetPublicAccountUserPreference() *cobra.Command { + cmd := &cobra.Command{} + + var getPublicAccountUserPreferenceReq settingsv2.GetPublicAccountUserPreferenceRequest + + cmd.Use = "get-public-account-user-preference USER_ID NAME" + cmd.Short = `Get a user preference.` + cmd.Long = `Get a user preference. + + Get a user preference for a specific user. User preferences are personal + settings that allow individual customization without affecting other users. + See :method:settingsv2/listaccountuserpreferencesmetadata for list of user + preferences available via public APIs. + + Arguments: + USER_ID: User ID of the user whose setting is being retrieved. + NAME: User Setting name.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + getPublicAccountUserPreferenceReq.UserId = args[0] + getPublicAccountUserPreferenceReq.Name = args[1] + + response, err := a.SettingsV2.GetPublicAccountUserPreference(ctx, getPublicAccountUserPreferenceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPublicAccountUserPreferenceOverrides { + fn(cmd, &getPublicAccountUserPreferenceReq) + } + + return cmd +} + // start list-account-settings-metadata command // Slice with functions to override default command behavior. @@ -145,6 +212,69 @@ func newListAccountSettingsMetadata() *cobra.Command { return cmd } +// start list-account-user-preferences-metadata command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listAccountUserPreferencesMetadataOverrides []func( + *cobra.Command, + *settingsv2.ListAccountUserPreferencesMetadataRequest, +) + +func newListAccountUserPreferencesMetadata() *cobra.Command { + cmd := &cobra.Command{} + + var listAccountUserPreferencesMetadataReq settingsv2.ListAccountUserPreferencesMetadataRequest + + cmd.Flags().IntVar(&listAccountUserPreferencesMetadataReq.PageSize, "page-size", listAccountUserPreferencesMetadataReq.PageSize, `The maximum number of settings to return.`) + cmd.Flags().StringVar(&listAccountUserPreferencesMetadataReq.PageToken, "page-token", listAccountUserPreferencesMetadataReq.PageToken, `A page token, received from a previous ListAccountUserPreferencesMetadataRequest call.`) + + cmd.Use = "list-account-user-preferences-metadata USER_ID" + cmd.Short = `List user preferences and their metadata.` + cmd.Long = `List user preferences and their metadata. + + List valid user preferences and their metadata for a specific user. User + preferences are personal settings that allow individual customization without + affecting other users. These settings are available to be referenced via GET + :method:settingsv2/getpublicaccountuserpreference and PATCH + :method:settingsv2/patchpublicaccountuserpreference APIs + + Arguments: + USER_ID: User ID of the user whose settings metadata is being retrieved.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + listAccountUserPreferencesMetadataReq.UserId = args[0] + + response := a.SettingsV2.ListAccountUserPreferencesMetadata(ctx, listAccountUserPreferencesMetadataReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listAccountUserPreferencesMetadataOverrides { + fn(cmd, &listAccountUserPreferencesMetadataReq) + } + + return cmd +} + // start patch-public-account-setting command // Slice with functions to override default command behavior. @@ -189,7 +319,9 @@ func newPatchPublicAccountSetting() *cobra.Command { :method:settingsv2/listaccountsettingsmetadata for list of setting available via public APIs at account level. To determine the correct field to include in a patch request, refer to the type field of the setting returned in the - :method:settingsv2/listaccountsettingsmetadata response.` + :method:settingsv2/listaccountsettingsmetadata response. + + Note: Page refresh is required for changes to take effect in UI.` cmd.Annotations = make(map[string]string) @@ -236,4 +368,93 @@ func newPatchPublicAccountSetting() *cobra.Command { return cmd } +// start patch-public-account-user-preference command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var patchPublicAccountUserPreferenceOverrides []func( + *cobra.Command, + *settingsv2.PatchPublicAccountUserPreferenceRequest, +) + +func newPatchPublicAccountUserPreference() *cobra.Command { + cmd := &cobra.Command{} + + var patchPublicAccountUserPreferenceReq settingsv2.PatchPublicAccountUserPreferenceRequest + patchPublicAccountUserPreferenceReq.Setting = settingsv2.UserPreference{} + var patchPublicAccountUserPreferenceJson flags.JsonFlag + + cmd.Flags().Var(&patchPublicAccountUserPreferenceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: boolean_val + // TODO: complex arg: effective_boolean_val + // TODO: complex arg: effective_string_val + cmd.Flags().StringVar(&patchPublicAccountUserPreferenceReq.Setting.Name, "name", patchPublicAccountUserPreferenceReq.Setting.Name, `Name of the setting.`) + // TODO: complex arg: string_val + cmd.Flags().StringVar(&patchPublicAccountUserPreferenceReq.Setting.UserId, "user-id", patchPublicAccountUserPreferenceReq.Setting.UserId, `User ID of the user.`) + + cmd.Use = "patch-public-account-user-preference USER_ID NAME" + cmd.Short = `Update a user preference.` + cmd.Long = `Update a user preference. + + Update a user preference for a specific user. User preferences are personal + settings that allow individual customization without affecting other users. + See :method:settingsv2/listaccountuserpreferencesmetadata for list of user + preferences available via public APIs. + + Note: Page refresh is required for changes to take effect in UI. + + Arguments: + USER_ID: User ID of the user whose setting is being updated. + NAME: ` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := patchPublicAccountUserPreferenceJson.Unmarshal(&patchPublicAccountUserPreferenceReq.Setting) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + patchPublicAccountUserPreferenceReq.UserId = args[0] + patchPublicAccountUserPreferenceReq.Name = args[1] + + response, err := a.SettingsV2.PatchPublicAccountUserPreference(ctx, patchPublicAccountUserPreferenceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range patchPublicAccountUserPreferenceOverrides { + fn(cmd, &patchPublicAccountUserPreferenceReq) + } + + return cmd +} + // end service AccountSettingsV2 diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index a321b5ff6d..f9d795e28f 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -21,8 +21,8 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "apps", - Short: `Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, - Long: `Apps run directly on a customer’s Databricks instance, integrate with their + Short: `Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, + Long: `Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, GroupID: "apps", diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index d6f9d5b707..1a6ad8990a 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -215,6 +215,7 @@ func newCreate() *cobra.Command { ]`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&createReq.DriverInstancePoolId, "driver-instance-pool-id", createReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) + // TODO: complex arg: driver_node_type_flexibility cmd.Flags().StringVar(&createReq.DriverNodeTypeId, "driver-node-type-id", createReq.DriverNodeTypeId, `The node type of the Spark driver.`) cmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) cmd.Flags().BoolVar(&createReq.EnableLocalDiskEncryption, "enable-local-disk-encryption", createReq.EnableLocalDiskEncryption, `Whether to enable LUKS on cluster VMs' local disks.`) @@ -234,6 +235,7 @@ func newCreate() *cobra.Command { // TODO: array: ssh_public_keys cmd.Flags().IntVar(&createReq.TotalInitialRemoteDiskSize, "total-initial-remote-disk-size", createReq.TotalInitialRemoteDiskSize, `If set, what the total initial volume size (in GB) of the remote disks should be.`) cmd.Flags().BoolVar(&createReq.UseMlRuntime, "use-ml-runtime", createReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`) + // TODO: complex arg: worker_node_type_flexibility // TODO: complex arg: workload_type cmd.Use = "create SPARK_VERSION" @@ -490,6 +492,7 @@ func newEdit() *cobra.Command { ]`) // TODO: complex arg: docker_image cmd.Flags().StringVar(&editReq.DriverInstancePoolId, "driver-instance-pool-id", editReq.DriverInstancePoolId, `The optional ID of the instance pool for the driver of the cluster belongs.`) + // TODO: complex arg: driver_node_type_flexibility cmd.Flags().StringVar(&editReq.DriverNodeTypeId, "driver-node-type-id", editReq.DriverNodeTypeId, `The node type of the Spark driver.`) cmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) cmd.Flags().BoolVar(&editReq.EnableLocalDiskEncryption, "enable-local-disk-encryption", editReq.EnableLocalDiskEncryption, `Whether to enable LUKS on cluster VMs' local disks.`) @@ -509,6 +512,7 @@ func newEdit() *cobra.Command { // TODO: array: ssh_public_keys cmd.Flags().IntVar(&editReq.TotalInitialRemoteDiskSize, "total-initial-remote-disk-size", editReq.TotalInitialRemoteDiskSize, `If set, what the total initial volume size (in GB) of the remote disks should be.`) cmd.Flags().BoolVar(&editReq.UseMlRuntime, "use-ml-runtime", editReq.UseMlRuntime, `This field can only be used when kind = CLASSIC_PREVIEW.`) + // TODO: complex arg: worker_node_type_flexibility // TODO: complex arg: workload_type cmd.Use = "edit CLUSTER_ID SPARK_VERSION" diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 5d5989a827..ade3300683 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -68,6 +68,7 @@ func newCreate() *cobra.Command { cmd.Flags().BoolVar(&createReq.IsDefaultForProvider, "is-default-for-provider", createReq.IsDefaultForProvider, `if the credential is the default for the given provider.`) cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `the name of the git credential, used for identification and ease of lookup.`) cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) + cmd.Flags().Int64Var(&createReq.PrincipalId, "principal-id", createReq.PrincipalId, `The ID of the service principal whose credentials will be modified.`) cmd.Use = "create GIT_PROVIDER" cmd.Short = `Create a credential entry.` @@ -152,6 +153,8 @@ func newDelete() *cobra.Command { var deleteReq workspace.DeleteCredentialsRequest + cmd.Flags().Int64Var(&deleteReq.PrincipalId, "principal-id", deleteReq.PrincipalId, `The ID of the service principal whose credentials will be modified.`) + cmd.Use = "delete CREDENTIAL_ID" cmd.Short = `Delete a credential.` cmd.Long = `Delete a credential. @@ -171,7 +174,7 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down." - names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx) + names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx, workspace.ListCredentialsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Git Credentials drop-down. Please manually specify required arguments. Original error: %w", err) @@ -223,6 +226,8 @@ func newGet() *cobra.Command { var getReq workspace.GetCredentialsRequest + cmd.Flags().Int64Var(&getReq.PrincipalId, "principal-id", getReq.PrincipalId, `The ID of the service principal whose credentials will be modified.`) + cmd.Use = "get CREDENTIAL_ID" cmd.Short = `Get a credential entry.` cmd.Long = `Get a credential entry. @@ -242,7 +247,7 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No CREDENTIAL_ID argument specified. Loading names for Git Credentials drop-down." - names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx) + names, err := w.GitCredentials.CredentialInfoGitProviderToCredentialIdMap(ctx, workspace.ListCredentialsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Git Credentials drop-down. Please manually specify required arguments. Original error: %w", err) @@ -286,25 +291,35 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *workspace.ListCredentialsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq workspace.ListCredentialsRequest + + cmd.Flags().Int64Var(&listReq.PrincipalId, "principal-id", listReq.PrincipalId, `The ID of the service principal whose credentials will be modified.`) + cmd.Use = "list" cmd.Short = `Get Git credentials.` cmd.Long = `Get Git credentials. - Lists the calling user's Git credentials. One credential per user is - supported.` + Lists the calling user's Git credentials.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - response := w.GitCredentials.List(ctx) + + response := w.GitCredentials.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -314,7 +329,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -342,6 +357,7 @@ func newUpdate() *cobra.Command { cmd.Flags().BoolVar(&updateReq.IsDefaultForProvider, "is-default-for-provider", updateReq.IsDefaultForProvider, `if the credential is the default for the given provider.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `the name of the git credential, used for identification and ease of lookup.`) cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) + cmd.Flags().Int64Var(&updateReq.PrincipalId, "principal-id", updateReq.PrincipalId, `The ID of the service principal whose credentials will be modified.`) cmd.Use = "update CREDENTIAL_ID GIT_PROVIDER" cmd.Short = `Update a credential.` diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 727e8416ae..15fdee0d83 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -88,6 +88,7 @@ func newCreate() *cobra.Command { cmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) cmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&createReq.MinIdleInstances, "min-idle-instances", createReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) + // TODO: complex arg: node_type_flexibility // TODO: array: preloaded_docker_images // TODO: array: preloaded_spark_versions cmd.Flags().IntVar(&createReq.RemoteDiskThroughput, "remote-disk-throughput", createReq.RemoteDiskThroughput, `If set, what the configurable throughput (in Mb/s) for the remote disk is.`) @@ -282,6 +283,7 @@ func newEdit() *cobra.Command { cmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) cmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&editReq.MinIdleInstances, "min-idle-instances", editReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) + // TODO: complex arg: node_type_flexibility cmd.Flags().IntVar(&editReq.RemoteDiskThroughput, "remote-disk-throughput", editReq.RemoteDiskThroughput, `If set, what the configurable throughput (in Mb/s) for the remote disk is.`) cmd.Flags().IntVar(&editReq.TotalInitialRemoteDiskSize, "total-initial-remote-disk-size", editReq.TotalInitialRemoteDiskSize, `If set, what the total initial volume size (in GB) of the remote disks should be.`) diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 803af77df4..a8f94c2d38 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1578,7 +1578,14 @@ func newSubmit() *cobra.Command { Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job. Runs submitted using this endpoint don’t display in the UI. Use the jobs/runs/get API to check the run state after the job is - submitted.` + submitted. + + **Important:** Jobs submitted using this endpoint are not saved as a job. They + do not show up in the Jobs UI, and do not retry when they fail. Because they + are not saved, Databricks cannot auto-optimize serverless compute in case of + failure. If your job fails, you may want to use classic compute to specify the + compute needs for the job. Alternatively, use the POST /jobs/create and + POST /jobs/run-now endpoints to create and run a saved job.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/postgres/postgres.go b/cmd/workspace/postgres/postgres.go index f8707f2f25..9ee2525bce 100755 --- a/cmd/workspace/postgres/postgres.go +++ b/cmd/workspace/postgres/postgres.go @@ -3,6 +3,7 @@ package postgres import ( + "fmt" "strings" "time" @@ -23,14 +24,32 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "postgres", - Short: `The Postgres API provides access to a Postgres database via REST API or direct SQL.`, - Long: `The Postgres API provides access to a Postgres database via REST API or direct - SQL.`, - GroupID: "postgres", + Short: `Use the Postgres API to create and manage Lakebase Autoscaling Postgres infrastructure, including projects, branches, compute endpoints, and roles.`, + Long: `Use the Postgres API to create and manage Lakebase Autoscaling Postgres + infrastructure, including projects, branches, compute endpoints, and roles. + + This API manages database infrastructure only. To query or modify data, use + the Data API or direct SQL connections. + + **About resource IDs and names** + + Lakebase APIs use hierarchical resource names in API paths to identify + resources, such as + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}. - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + When creating a resource, you may optionally provide the final ID component + (for example, project_id, branch_id, or endpoint_id). If you do not, the + system generates an identifier and uses it as the ID component. + + The name field is output-only and represents the full resource path. Note: + The term *resource name* in this API refers to this full, hierarchical + identifier (for example, projects/{project_id}), not the display_name + field. The display_name is a separate, user-visible label shown in the UI. + + The uid field is a system-generated, immutable identifier intended for + internal reference and should not be used to address or locate resources.`, + GroupID: "postgres", + RunE: root.ReportUnknownSubcommand, } // Add methods @@ -42,6 +61,7 @@ func New() *cobra.Command { cmd.AddCommand(newDeleteEndpoint()) cmd.AddCommand(newDeleteProject()) cmd.AddCommand(newDeleteRole()) + cmd.AddCommand(newGenerateDatabaseCredential()) cmd.AddCommand(newGetBranch()) cmd.AddCommand(newGetEndpoint()) cmd.AddCommand(newGetOperation()) @@ -87,15 +107,16 @@ func newCreateBranch() *cobra.Command { cmd.Flags().Var(&createBranchJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createBranchReq.BranchId, "branch-id", createBranchReq.BranchId, `The ID to use for the Branch, which will become the final component of the branch's resource name.`) cmd.Flags().StringVar(&createBranchReq.Branch.Name, "name", createBranchReq.Branch.Name, `The resource name of the branch.`) // TODO: complex arg: spec // TODO: complex arg: status - cmd.Use = "create-branch PARENT" + cmd.Use = "create-branch PARENT BRANCH_ID" cmd.Short = `Create a Branch.` cmd.Long = `Create a Branch. + Creates a new database branch in the project. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for @@ -103,12 +124,19 @@ func newCreateBranch() *cobra.Command { Arguments: PARENT: The Project where this Branch will be created. Format: - projects/{project_id}` + projects/{project_id} + BRANCH_ID: The ID to use for the Branch. This becomes the final component of the + branch's resource name. The ID must be 1-63 characters long, start with a + lowercase letter, and contain only lowercase letters, numbers, and hyphens + (RFC 1123). Examples: - With custom ID: staging → name becomes + projects/{project_id}/branches/staging - Without custom ID: system + generates slug → name becomes + projects/{project_id}/branches/br-example-name-x1y2z3a4` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + check := root.ExactArgs(2) return check(cmd, args) } @@ -130,6 +158,7 @@ func newCreateBranch() *cobra.Command { } } createBranchReq.Parent = args[0] + createBranchReq.BranchId = args[1] // Determine which mode to execute based on flags. switch { @@ -205,15 +234,16 @@ func newCreateEndpoint() *cobra.Command { cmd.Flags().Var(&createEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createEndpointReq.EndpointId, "endpoint-id", createEndpointReq.EndpointId, `The ID to use for the Endpoint, which will become the final component of the endpoint's resource name.`) cmd.Flags().StringVar(&createEndpointReq.Endpoint.Name, "name", createEndpointReq.Endpoint.Name, `The resource name of the endpoint.`) // TODO: complex arg: spec // TODO: complex arg: status - cmd.Use = "create-endpoint PARENT" + cmd.Use = "create-endpoint PARENT ENDPOINT_ID" cmd.Short = `Create an Endpoint.` cmd.Long = `Create an Endpoint. + Creates a new compute endpoint in the branch. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for @@ -221,12 +251,19 @@ func newCreateEndpoint() *cobra.Command { Arguments: PARENT: The Branch where this Endpoint will be created. Format: - projects/{project_id}/branches/{branch_id}` + projects/{project_id}/branches/{branch_id} + ENDPOINT_ID: The ID to use for the Endpoint. This becomes the final component of the + endpoint's resource name. The ID must be 1-63 characters long, start with + a lowercase letter, and contain only lowercase letters, numbers, and + hyphens (RFC 1123). Examples: - With custom ID: primary → name becomes + projects/{project_id}/branches/{branch_id}/endpoints/primary - Without + custom ID: system generates slug → name becomes + projects/{project_id}/branches/{branch_id}/endpoints/ep-example-name-x1y2z3a4` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + check := root.ExactArgs(2) return check(cmd, args) } @@ -248,6 +285,7 @@ func newCreateEndpoint() *cobra.Command { } } createEndpointReq.Parent = args[0] + createEndpointReq.EndpointId = args[1] // Determine which mode to execute based on flags. switch { @@ -323,24 +361,34 @@ func newCreateProject() *cobra.Command { cmd.Flags().Var(&createProjectJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createProjectReq.ProjectId, "project-id", createProjectReq.ProjectId, `The ID to use for the Project, which will become the final component of the project's resource name.`) cmd.Flags().StringVar(&createProjectReq.Project.Name, "name", createProjectReq.Project.Name, `The resource name of the project.`) // TODO: complex arg: spec // TODO: complex arg: status - cmd.Use = "create-project" + cmd.Use = "create-project PROJECT_ID" cmd.Short = `Create a Project.` cmd.Long = `Create a Project. + Creates a new Lakebase Autoscaling Postgres database project, which contains + branches and compute endpoints. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for - completion using the get-operation command.` + completion using the get-operation command. + + Arguments: + PROJECT_ID: The ID to use for the Project. This becomes the final component of the + project's resource name. The ID must be 1-63 characters long, start with a + lowercase letter, and contain only lowercase letters, numbers, and hyphens + (RFC 1123). Examples: - With custom ID: production → name becomes + projects/production - Without custom ID: system generates UUID → name + becomes projects/a7f89b2c-3d4e-5f6g-7h8i-9j0k1l2m3n4o` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) + check := root.ExactArgs(1) return check(cmd, args) } @@ -361,6 +409,7 @@ func newCreateProject() *cobra.Command { } } } + createProjectReq.ProjectId = args[0] // Determine which mode to execute based on flags. switch { @@ -444,7 +493,7 @@ func newCreateRole() *cobra.Command { cmd.Short = `Create a postgres role for a branch.` cmd.Long = `Create a postgres role for a branch. - Create a role for a branch. + Creates a new Postgres role in the branch. This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw @@ -455,10 +504,13 @@ func newCreateRole() *cobra.Command { PARENT: The Branch where this Role is created. Format: projects/{project_id}/branches/{branch_id} ROLE_ID: The ID to use for the Role, which will become the final component of the - branch's resource name. This ID becomes the role in postgres. + role's resource name. This ID becomes the role in Postgres. + + This value should be 4-63 characters, and valid characters are lowercase + letters, numbers, and hyphens, as defined by RFC 1123.` - This value should be 4-63 characters, and only use characters available in - DNS names, as defined by RFC-1123` + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -551,10 +603,23 @@ func newDeleteBranch() *cobra.Command { var deleteBranchReq postgres.DeleteBranchRequest + var deleteBranchSkipWait bool + var deleteBranchTimeout time.Duration + + cmd.Flags().BoolVar(&deleteBranchSkipWait, "no-wait", deleteBranchSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&deleteBranchTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + cmd.Use = "delete-branch NAME" cmd.Short = `Delete a Branch.` cmd.Long = `Delete a Branch. + Deletes the specified database branch. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + Arguments: NAME: The name of the Branch to delete. Format: projects/{project_id}/branches/{branch_id}` @@ -573,11 +638,43 @@ func newDeleteBranch() *cobra.Command { deleteBranchReq.Name = args[0] - err = w.Postgres.DeleteBranch(ctx, deleteBranchReq) - if err != nil { - return err + // Determine which mode to execute based on flags. + switch { + case deleteBranchSkipWait: + wait, err := w.Postgres.DeleteBranch(ctx, deleteBranchReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.DeleteBranch(ctx, deleteBranchReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for delete-branch to complete..." + + // Wait for completion. + opts := api.WithTimeout(deleteBranchTimeout) + + err = wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return nil } - return nil } // Disable completions since they are not applicable. @@ -606,10 +703,23 @@ func newDeleteEndpoint() *cobra.Command { var deleteEndpointReq postgres.DeleteEndpointRequest + var deleteEndpointSkipWait bool + var deleteEndpointTimeout time.Duration + + cmd.Flags().BoolVar(&deleteEndpointSkipWait, "no-wait", deleteEndpointSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&deleteEndpointTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + cmd.Use = "delete-endpoint NAME" cmd.Short = `Delete an Endpoint.` cmd.Long = `Delete an Endpoint. + Deletes the specified compute endpoint. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + Arguments: NAME: The name of the Endpoint to delete. Format: projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}` @@ -628,11 +738,43 @@ func newDeleteEndpoint() *cobra.Command { deleteEndpointReq.Name = args[0] - err = w.Postgres.DeleteEndpoint(ctx, deleteEndpointReq) - if err != nil { - return err + // Determine which mode to execute based on flags. + switch { + case deleteEndpointSkipWait: + wait, err := w.Postgres.DeleteEndpoint(ctx, deleteEndpointReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.DeleteEndpoint(ctx, deleteEndpointReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for delete-endpoint to complete..." + + // Wait for completion. + opts := api.WithTimeout(deleteEndpointTimeout) + + err = wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return nil } - return nil } // Disable completions since they are not applicable. @@ -661,10 +803,23 @@ func newDeleteProject() *cobra.Command { var deleteProjectReq postgres.DeleteProjectRequest + var deleteProjectSkipWait bool + var deleteProjectTimeout time.Duration + + cmd.Flags().BoolVar(&deleteProjectSkipWait, "no-wait", deleteProjectSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&deleteProjectTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + cmd.Use = "delete-project NAME" cmd.Short = `Delete a Project.` cmd.Long = `Delete a Project. + Deletes the specified database project. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + Arguments: NAME: The name of the Project to delete. Format: projects/{project_id}` @@ -682,11 +837,43 @@ func newDeleteProject() *cobra.Command { deleteProjectReq.Name = args[0] - err = w.Postgres.DeleteProject(ctx, deleteProjectReq) - if err != nil { - return err + // Determine which mode to execute based on flags. + switch { + case deleteProjectSkipWait: + wait, err := w.Postgres.DeleteProject(ctx, deleteProjectReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.DeleteProject(ctx, deleteProjectReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for delete-project to complete..." + + // Wait for completion. + opts := api.WithTimeout(deleteProjectTimeout) + + err = wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return nil } - return nil } // Disable completions since they are not applicable. @@ -727,7 +914,7 @@ func newDeleteRole() *cobra.Command { cmd.Short = `Delete a postgres role in a branch.` cmd.Long = `Delete a postgres role in a branch. - Delete a role in a branch. + Deletes the specified Postgres role. This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw @@ -736,7 +923,10 @@ func newDeleteRole() *cobra.Command { Arguments: NAME: The resource name of the postgres role. Format: - projects/{project_id}/branch/{branch_id}/roles/{role_id}` + projects/{project_id}/branches/{branch_id}/roles/{role_id}` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -803,6 +993,88 @@ func newDeleteRole() *cobra.Command { return cmd } +// start generate-database-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var generateDatabaseCredentialOverrides []func( + *cobra.Command, + *postgres.GenerateDatabaseCredentialRequest, +) + +func newGenerateDatabaseCredential() *cobra.Command { + cmd := &cobra.Command{} + + var generateDatabaseCredentialReq postgres.GenerateDatabaseCredentialRequest + var generateDatabaseCredentialJson flags.JsonFlag + + cmd.Flags().Var(&generateDatabaseCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: claims + + cmd.Use = "generate-database-credential ENDPOINT" + cmd.Short = `Generate OAuth credentials for a Postgres database.` + cmd.Long = `Generate OAuth credentials for a Postgres database. + + Arguments: + ENDPOINT: This field is not yet supported. The endpoint for which this credential + will be generated. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'endpoint' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := generateDatabaseCredentialJson.Unmarshal(&generateDatabaseCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + generateDatabaseCredentialReq.Endpoint = args[0] + } + + response, err := w.Postgres.GenerateDatabaseCredential(ctx, generateDatabaseCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range generateDatabaseCredentialOverrides { + fn(cmd, &generateDatabaseCredentialReq) + } + + return cmd +} + // start get-branch command // Slice with functions to override default command behavior. @@ -821,8 +1093,10 @@ func newGetBranch() *cobra.Command { cmd.Short = `Get a Branch.` cmd.Long = `Get a Branch. + Retrieves information about the specified database branch. + Arguments: - NAME: The name of the Branch to retrieve. Format: + NAME: The resource name of the branch to retrieve. Format: projects/{project_id}/branches/{branch_id}` cmd.Annotations = make(map[string]string) @@ -876,8 +1150,11 @@ func newGetEndpoint() *cobra.Command { cmd.Short = `Get an Endpoint.` cmd.Long = `Get an Endpoint. + Retrieves information about the specified compute endpoint, including its + connection details and operational state. + Arguments: - NAME: The name of the Endpoint to retrieve. Format: + NAME: The resource name of the endpoint to retrieve. Format: projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}` cmd.Annotations = make(map[string]string) @@ -931,6 +1208,8 @@ func newGetOperation() *cobra.Command { cmd.Short = `Get an Operation.` cmd.Long = `Get an Operation. + Retrieves the status of a long-running operation. + Arguments: NAME: The name of the operation resource.` @@ -985,8 +1264,11 @@ func newGetProject() *cobra.Command { cmd.Short = `Get a Project.` cmd.Long = `Get a Project. + Retrieves information about the specified database project. + Arguments: - NAME: The name of the Project to retrieve. Format: projects/{project_id}` + NAME: The resource name of the project to retrieve. Format: + projects/{project_id}` cmd.Annotations = make(map[string]string) @@ -1039,12 +1321,16 @@ func newGetRole() *cobra.Command { cmd.Short = `Get a postgres role in a branch.` cmd.Long = `Get a postgres role in a branch. - Get a Role. + Retrieves information about the specified Postgres role, including its + authentication method and permissions. Arguments: NAME: The name of the Role to retrieve. Format: projects/{project_id}/branches/{branch_id}/roles/{role_id}` + // This command is being previewed; hide from help output. + cmd.Hidden = true + cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1093,12 +1379,14 @@ func newListBranches() *cobra.Command { var listBranchesReq postgres.ListBranchesRequest cmd.Flags().IntVar(&listBranchesReq.PageSize, "page-size", listBranchesReq.PageSize, `Upper bound for items returned.`) - cmd.Flags().StringVar(&listBranchesReq.PageToken, "page-token", listBranchesReq.PageToken, `Pagination token to go to the next page of Branches.`) + cmd.Flags().StringVar(&listBranchesReq.PageToken, "page-token", listBranchesReq.PageToken, `Page token from a previous response.`) cmd.Use = "list-branches PARENT" cmd.Short = `List Branches.` cmd.Long = `List Branches. + Returns a paginated list of database branches in the project. + Arguments: PARENT: The Project that owns this collection of branches. Format: projects/{project_id}` @@ -1148,12 +1436,14 @@ func newListEndpoints() *cobra.Command { var listEndpointsReq postgres.ListEndpointsRequest cmd.Flags().IntVar(&listEndpointsReq.PageSize, "page-size", listEndpointsReq.PageSize, `Upper bound for items returned.`) - cmd.Flags().StringVar(&listEndpointsReq.PageToken, "page-token", listEndpointsReq.PageToken, `Pagination token to go to the next page of Endpoints.`) + cmd.Flags().StringVar(&listEndpointsReq.PageToken, "page-token", listEndpointsReq.PageToken, `Page token from a previous response.`) cmd.Use = "list-endpoints PARENT" cmd.Short = `List Endpoints.` cmd.Long = `List Endpoints. + Returns a paginated list of compute endpoints in the branch. + Arguments: PARENT: The Branch that owns this collection of endpoints. Format: projects/{project_id}/branches/{branch_id}` @@ -1203,11 +1493,14 @@ func newListProjects() *cobra.Command { var listProjectsReq postgres.ListProjectsRequest cmd.Flags().IntVar(&listProjectsReq.PageSize, "page-size", listProjectsReq.PageSize, `Upper bound for items returned.`) - cmd.Flags().StringVar(&listProjectsReq.PageToken, "page-token", listProjectsReq.PageToken, `Pagination token to go to the next page of Projects.`) + cmd.Flags().StringVar(&listProjectsReq.PageToken, "page-token", listProjectsReq.PageToken, `Page token from a previous response.`) cmd.Use = "list-projects" cmd.Short = `List Projects.` - cmd.Long = `List Projects.` + cmd.Long = `List Projects. + + Returns a paginated list of database projects in the workspace that the user + has permission to access.` cmd.Annotations = make(map[string]string) @@ -1252,18 +1545,21 @@ func newListRoles() *cobra.Command { var listRolesReq postgres.ListRolesRequest cmd.Flags().IntVar(&listRolesReq.PageSize, "page-size", listRolesReq.PageSize, `Upper bound for items returned.`) - cmd.Flags().StringVar(&listRolesReq.PageToken, "page-token", listRolesReq.PageToken, `Pagination token to go to the next page of Roles.`) + cmd.Flags().StringVar(&listRolesReq.PageToken, "page-token", listRolesReq.PageToken, `Page token from a previous response.`) cmd.Use = "list-roles PARENT" cmd.Short = `List postgres roles in a branch.` cmd.Long = `List postgres roles in a branch. - List Roles. + Returns a paginated list of Postgres roles in the branch. Arguments: PARENT: The Branch that owns this collection of roles. Format: projects/{project_id}/branches/{branch_id}` + // This command is being previewed; hide from help output. + cmd.Hidden = true + cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1326,14 +1622,17 @@ func newUpdateBranch() *cobra.Command { cmd.Short = `Update a Branch.` cmd.Long = `Update a Branch. + Updates the specified database branch. You can set this branch as the + project's default branch, or protect/unprotect it. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for completion using the get-operation command. Arguments: - NAME: The resource name of the branch. Format: - projects/{project_id}/branches/{branch_id} + NAME: The resource name of the branch. This field is output-only and constructed + by the system. Format: projects/{project_id}/branches/{branch_id} UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated when possible.` @@ -1449,13 +1748,17 @@ func newUpdateEndpoint() *cobra.Command { cmd.Short = `Update an Endpoint.` cmd.Long = `Update an Endpoint. + Updates the specified compute endpoint. You can update autoscaling limits, + suspend timeout, or enable/disable the compute endpoint. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for completion using the get-operation command. Arguments: - NAME: The resource name of the endpoint. Format: + NAME: The resource name of the endpoint. This field is output-only and + constructed by the system. Format: projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated when possible.` @@ -1572,13 +1875,16 @@ func newUpdateProject() *cobra.Command { cmd.Short = `Update a Project.` cmd.Long = `Update a Project. + Updates the specified database project. + This is a long-running operation. By default, the command waits for the operation to complete. Use --no-wait to return immediately with the raw operation details. The operation's 'name' field can then be used to poll for completion using the get-operation command. Arguments: - NAME: The resource name of the project. Format: projects/{project_id} + NAME: The resource name of the project. This field is output-only and + constructed by the system. Format: projects/{project_id} UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated when possible.` diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index b1305b6409..6dfd97d9a9 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -300,9 +300,11 @@ func newList() *cobra.Command { cmd.Long = `List providers. Gets an array of available authentication providers. The caller must either be - a metastore admin or the owner of the providers. Providers not owned by the - caller are not included in the response. There is no guarantee of a specific - ordering of the elements in the array.` + a metastore admin, have the **USE_PROVIDER** privilege on the providers, or be + the owner of the providers. Providers not owned by the caller and for which + the caller does not have the **USE_PROVIDER** privilege are not included in + the response. There is no guarantee of a specific ordering of the elements in + the array.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go index 6e5bf01111..2264ddf000 100755 --- a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go +++ b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go @@ -19,9 +19,11 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "quality-monitor-v2", - Short: `Manage data quality of UC objects (currently support schema).`, - Long: `Manage data quality of UC objects (currently support schema)`, + Use: "quality-monitor-v2", + Short: `[DEPRECATED] This API is deprecated.`, + Long: `[DEPRECATED] This API is deprecated. Please use the Data Quality Monitoring + API instead (REST: /api/data-quality/v1/monitors). Manage data quality of UC + objects (currently support schema).`, GroupID: "qualitymonitor", RunE: root.ReportUnknownSubcommand, } @@ -60,12 +62,14 @@ func newCreateQualityMonitor() *cobra.Command { cmd.Flags().Var(&createQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: anomaly_detection_config + // TODO: array: validity_check_configurations cmd.Use = "create-quality-monitor OBJECT_TYPE OBJECT_ID" cmd.Short = `Create a quality monitor.` cmd.Long = `Create a quality monitor. - Create a quality monitor on UC object + [DEPRECATED] Create a quality monitor on UC object. Use Data Quality + Monitoring API instead. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -146,7 +150,8 @@ func newDeleteQualityMonitor() *cobra.Command { cmd.Short = `Delete a quality monitor.` cmd.Long = `Delete a quality monitor. - Delete a quality monitor on UC object + [DEPRECATED] Delete a quality monitor on UC object. Use Data Quality + Monitoring API instead. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -204,7 +209,8 @@ func newGetQualityMonitor() *cobra.Command { cmd.Short = `Read a quality monitor.` cmd.Long = `Read a quality monitor. - Read a quality monitor on UC object + [DEPRECATED] Read a quality monitor on UC object. Use Data Quality Monitoring + API instead. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -265,7 +271,8 @@ func newListQualityMonitor() *cobra.Command { cmd.Short = `List quality monitors.` cmd.Long = `List quality monitors. - (Unimplemented) List quality monitors` + [DEPRECATED] (Unimplemented) List quality monitors. Use Data Quality + Monitoring API instead.` cmd.Annotations = make(map[string]string) @@ -314,12 +321,14 @@ func newUpdateQualityMonitor() *cobra.Command { cmd.Flags().Var(&updateQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: anomaly_detection_config + // TODO: array: validity_check_configurations cmd.Use = "update-quality-monitor OBJECT_TYPE OBJECT_ID OBJECT_TYPE OBJECT_ID" cmd.Short = `Update a quality monitor.` cmd.Long = `Update a quality monitor. - (Unimplemented) Update a quality monitor on UC object + [DEPRECATED] (Unimplemented) Update a quality monitor on UC object. Use Data + Quality Monitoring API instead. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. diff --git a/cmd/workspace/quality-monitors/quality-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go index e7e311eb97..f65359a44e 100755 --- a/cmd/workspace/quality-monitors/quality-monitors.go +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -20,8 +20,12 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "quality-monitors", - Short: `A monitor computes and monitors data or model quality metrics for a table over time.`, - Long: `A monitor computes and monitors data or model quality metrics for a table over + Short: `[DEPRECATED] This API is deprecated.`, + Long: `[DEPRECATED] This API is deprecated. Please use the Data Quality Monitors API + instead (REST: /api/data-quality/v1/monitors), which manages both Data + Profiling and Anomaly Detection. + + A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics tables and a dashboard that you can use to monitor table health and set alerts. Most write operations require the user to be the owner of the table (or its parent schema or parent catalog). Viewing the @@ -69,7 +73,8 @@ func newCancelRefresh() *cobra.Command { cmd.Short = `Cancel refresh.` cmd.Long = `Cancel refresh. - Cancels an already-initiated refresh job. + [DEPRECATED] Cancels an already-initiated refresh job. Use Data Quality + Monitors API instead (/api/data-quality/v1/monitors). Arguments: TABLE_NAME: UC table name in format catalog.schema.table_name. table_name is case @@ -150,7 +155,8 @@ func newCreate() *cobra.Command { cmd.Short = `Create a table monitor.` cmd.Long = `Create a table monitor. - Creates a new monitor for the specified table. + [DEPRECATED] Creates a new monitor for the specified table. Use Data Quality + Monitors API instead (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the @@ -247,7 +253,8 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a table monitor.` cmd.Long = `Delete a table monitor. - Deletes a monitor for the specified table. + [DEPRECATED] Deletes a monitor for the specified table. Use Data Quality + Monitors API instead (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -316,7 +323,8 @@ func newGet() *cobra.Command { cmd.Short = `Get a table monitor.` cmd.Long = `Get a table monitor. - Gets a monitor for the specified table. + [DEPRECATED] Gets a monitor for the specified table. Use Data Quality Monitors + API instead (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -384,7 +392,9 @@ func newGetRefresh() *cobra.Command { cmd.Short = `Get refresh.` cmd.Long = `Get refresh. - Gets info about a specific monitor refresh using the given refresh ID. + [DEPRECATED] Gets info about a specific monitor refresh using the given + refresh ID. Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -454,8 +464,9 @@ func newListRefreshes() *cobra.Command { cmd.Short = `List refreshes.` cmd.Long = `List refreshes. - Gets an array containing the history of the most recent refreshes (up to 25) - for this table. + [DEPRECATED] Gets an array containing the history of the most recent refreshes + (up to 25) for this table. Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -526,7 +537,8 @@ func newRegenerateDashboard() *cobra.Command { cmd.Short = `Regenerate a monitoring dashboard.` cmd.Long = `Regenerate a monitoring dashboard. - Regenerates the monitoring dashboard for the specified table. + [DEPRECATED] Regenerates the monitoring dashboard for the specified table. Use + Data Quality Monitors API instead (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -608,8 +620,9 @@ func newRunRefresh() *cobra.Command { cmd.Short = `Run refresh.` cmd.Long = `Run refresh. - Queues a metric refresh on the monitor for the specified table. The refresh - will execute in the background. + [DEPRECATED] Queues a metric refresh on the monitor for the specified table. + Use Data Quality Monitors API instead (/api/data-quality/v1/monitors). The + refresh will execute in the background. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -690,7 +703,8 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a table monitor.` cmd.Long = `Update a table monitor. - Updates a monitor for the specified table. + [DEPRECATED] Updates a monitor for the specified table. Use Data Quality + Monitors API instead (/api/data-quality/v1/monitors). The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index ef58949457..3202a5aad8 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -233,9 +233,9 @@ func newGet() *cobra.Command { cmd.Short = `Get a share recipient.` cmd.Long = `Get a share recipient. - Gets a share recipient from the metastore if: - - * the caller is the owner of the share recipient, or: * is a metastore admin + Gets a share recipient from the metastore. The caller must be one of: * A user + with **USE_RECIPIENT** privilege on the metastore * The owner of the share + recipient * A metastore admin Arguments: NAME: Name of the recipient.` @@ -440,7 +440,8 @@ func newSharePermissions() *cobra.Command { cmd.Long = `Get recipient share permissions. Gets the share permissions for the specified Recipient. The caller must have - the USE_RECIPIENT privilege on the metastore or be the owner of the Recipient. + the **USE_RECIPIENT** privilege on the metastore or be the owner of the + Recipient. Arguments: NAME: The name of the Recipient.` diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 4cbacdcc35..07b36c77a5 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -4,12 +4,14 @@ package warehouses import ( "fmt" + "strings" "time" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/common/types/fieldmask" "github.com/databricks/databricks-sdk-go/service/sql" "github.com/spf13/cobra" ) @@ -31,17 +33,22 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateDefaultWarehouseOverride()) cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteDefaultWarehouseOverride()) cmd.AddCommand(newEdit()) cmd.AddCommand(newGet()) + cmd.AddCommand(newGetDefaultWarehouseOverride()) cmd.AddCommand(newGetPermissionLevels()) cmd.AddCommand(newGetPermissions()) cmd.AddCommand(newGetWorkspaceWarehouseConfig()) cmd.AddCommand(newList()) + cmd.AddCommand(newListDefaultWarehouseOverrides()) cmd.AddCommand(newSetPermissions()) cmd.AddCommand(newSetWorkspaceWarehouseConfig()) cmd.AddCommand(newStart()) cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdateDefaultWarehouseOverride()) cmd.AddCommand(newUpdatePermissions()) // Apply optional overrides to this command. @@ -158,6 +165,103 @@ func newCreate() *cobra.Command { return cmd } +// start create-default-warehouse-override command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDefaultWarehouseOverrideOverrides []func( + *cobra.Command, + *sql.CreateDefaultWarehouseOverrideRequest, +) + +func newCreateDefaultWarehouseOverride() *cobra.Command { + cmd := &cobra.Command{} + + var createDefaultWarehouseOverrideReq sql.CreateDefaultWarehouseOverrideRequest + createDefaultWarehouseOverrideReq.DefaultWarehouseOverride = sql.DefaultWarehouseOverride{} + var createDefaultWarehouseOverrideJson flags.JsonFlag + + cmd.Flags().Var(&createDefaultWarehouseOverrideJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Name, "name", createDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Name, `The resource name of the default warehouse override.`) + cmd.Flags().StringVar(&createDefaultWarehouseOverrideReq.DefaultWarehouseOverride.WarehouseId, "warehouse-id", createDefaultWarehouseOverrideReq.DefaultWarehouseOverride.WarehouseId, `The specific warehouse ID when type is CUSTOM.`) + + cmd.Use = "create-default-warehouse-override DEFAULT_WAREHOUSE_OVERRIDE_ID TYPE" + cmd.Short = `Create default warehouse override.` + cmd.Long = `Create default warehouse override. + + Creates a new default warehouse override for a user. Users can create their + own override. Admins can create overrides for any user. + + Arguments: + DEFAULT_WAREHOUSE_OVERRIDE_ID: Required. The ID to use for the override, which will become the final + component of the override's resource name. Can be a numeric user ID or the + literal string "me" for the current user. + TYPE: The type of override behavior. + Supported values: [CUSTOM, LAST_SELECTED]` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only DEFAULT_WAREHOUSE_OVERRIDE_ID as positional arguments. Provide 'type' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDefaultWarehouseOverrideJson.Unmarshal(&createDefaultWarehouseOverrideReq.DefaultWarehouseOverride) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createDefaultWarehouseOverrideReq.DefaultWarehouseOverrideId = args[0] + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Type) + if err != nil { + return fmt.Errorf("invalid TYPE: %s", args[1]) + } + + } + + response, err := w.Warehouses.CreateDefaultWarehouseOverride(ctx, createDefaultWarehouseOverrideReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDefaultWarehouseOverrideOverrides { + fn(cmd, &createDefaultWarehouseOverrideReq) + } + + return cmd +} + // start delete command // Slice with functions to override default command behavior. @@ -226,6 +330,82 @@ func newDelete() *cobra.Command { return cmd } +// start delete-default-warehouse-override command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDefaultWarehouseOverrideOverrides []func( + *cobra.Command, + *sql.DeleteDefaultWarehouseOverrideRequest, +) + +func newDeleteDefaultWarehouseOverride() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDefaultWarehouseOverrideReq sql.DeleteDefaultWarehouseOverrideRequest + + cmd.Use = "delete-default-warehouse-override NAME" + cmd.Short = `Delete default warehouse override.` + cmd.Long = `Delete default warehouse override. + + Deletes the default warehouse override for a user. Users can delete their own + override. Admins can delete overrides for any user. After deletion, the + workspace default warehouse will be used. + + Arguments: + NAME: Required. The resource name of the default warehouse override to delete. + Format: default-warehouse-overrides/{default_warehouse_override_id} The + default_warehouse_override_id can be a numeric user ID or the literal + string "me" for the current user.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Required") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have required") + } + deleteDefaultWarehouseOverrideReq.Name = args[0] + + err = w.Warehouses.DeleteDefaultWarehouseOverride(ctx, deleteDefaultWarehouseOverrideReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDefaultWarehouseOverrideOverrides { + fn(cmd, &deleteDefaultWarehouseOverrideReq) + } + + return cmd +} + // start edit command // Slice with functions to override default command behavior. @@ -416,6 +596,82 @@ func newGet() *cobra.Command { return cmd } +// start get-default-warehouse-override command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDefaultWarehouseOverrideOverrides []func( + *cobra.Command, + *sql.GetDefaultWarehouseOverrideRequest, +) + +func newGetDefaultWarehouseOverride() *cobra.Command { + cmd := &cobra.Command{} + + var getDefaultWarehouseOverrideReq sql.GetDefaultWarehouseOverrideRequest + + cmd.Use = "get-default-warehouse-override NAME" + cmd.Short = `Get default warehouse override.` + cmd.Long = `Get default warehouse override. + + Returns the default warehouse override for a user. Users can fetch their own + override. Admins can fetch overrides for any user. If no override exists, the + UI will fallback to the workspace default warehouse. + + Arguments: + NAME: Required. The resource name of the default warehouse override to retrieve. + Format: default-warehouse-overrides/{default_warehouse_override_id} The + default_warehouse_override_id can be a numeric user ID or the literal + string "me" for the current user.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Required") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have required") + } + getDefaultWarehouseOverrideReq.Name = args[0] + + response, err := w.Warehouses.GetDefaultWarehouseOverride(ctx, getDefaultWarehouseOverrideReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDefaultWarehouseOverrideOverrides { + fn(cmd, &getDefaultWarehouseOverrideReq) + } + + return cmd +} + // start get-permission-levels command // Slice with functions to override default command behavior. @@ -648,6 +904,61 @@ func newList() *cobra.Command { return cmd } +// start list-default-warehouse-overrides command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listDefaultWarehouseOverridesOverrides []func( + *cobra.Command, + *sql.ListDefaultWarehouseOverridesRequest, +) + +func newListDefaultWarehouseOverrides() *cobra.Command { + cmd := &cobra.Command{} + + var listDefaultWarehouseOverridesReq sql.ListDefaultWarehouseOverridesRequest + + cmd.Flags().IntVar(&listDefaultWarehouseOverridesReq.PageSize, "page-size", listDefaultWarehouseOverridesReq.PageSize, `The maximum number of overrides to return.`) + cmd.Flags().StringVar(&listDefaultWarehouseOverridesReq.PageToken, "page-token", listDefaultWarehouseOverridesReq.PageToken, `A page token, received from a previous ListDefaultWarehouseOverrides call.`) + + cmd.Use = "list-default-warehouse-overrides" + cmd.Short = `List default warehouse overrides.` + cmd.Long = `List default warehouse overrides. + + Lists all default warehouse overrides in the workspace. Only workspace + administrators can list all overrides.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.Warehouses.ListDefaultWarehouseOverrides(ctx, listDefaultWarehouseOverridesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listDefaultWarehouseOverridesOverrides { + fn(cmd, &listDefaultWarehouseOverridesReq) + } + + return cmd +} + // start set-permissions command // Slice with functions to override default command behavior. @@ -1000,6 +1311,125 @@ func newStop() *cobra.Command { return cmd } +// start update-default-warehouse-override command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateDefaultWarehouseOverrideOverrides []func( + *cobra.Command, + *sql.UpdateDefaultWarehouseOverrideRequest, +) + +func newUpdateDefaultWarehouseOverride() *cobra.Command { + cmd := &cobra.Command{} + + var updateDefaultWarehouseOverrideReq sql.UpdateDefaultWarehouseOverrideRequest + updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride = sql.DefaultWarehouseOverride{} + var updateDefaultWarehouseOverrideJson flags.JsonFlag + + cmd.Flags().Var(&updateDefaultWarehouseOverrideJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&updateDefaultWarehouseOverrideReq.AllowMissing, "allow-missing", updateDefaultWarehouseOverrideReq.AllowMissing, `If set to true, and the override is not found, a new override will be created.`) + cmd.Flags().StringVar(&updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Name, "name", updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Name, `The resource name of the default warehouse override.`) + cmd.Flags().StringVar(&updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride.WarehouseId, "warehouse-id", updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride.WarehouseId, `The specific warehouse ID when type is CUSTOM.`) + + cmd.Use = "update-default-warehouse-override NAME UPDATE_MASK TYPE" + cmd.Short = `Update default warehouse override.` + cmd.Long = `Update default warehouse override. + + Updates an existing default warehouse override for a user. Users can update + their own override. Admins can update overrides for any user. + + Arguments: + NAME: The resource name of the default warehouse override. Format: + default-warehouse-overrides/{default_warehouse_override_id} + UPDATE_MASK: Required. Field mask specifying which fields to update. Only the fields + specified in the mask will be updated. Use "*" to update all fields. When + allow_missing is true, this field is ignored and all fields are applied. + TYPE: The type of override behavior. + Supported values: [CUSTOM, LAST_SELECTED]` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only NAME, UPDATE_MASK as positional arguments. Provide 'type' in your JSON input") + } + return nil + } + return nil + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateDefaultWarehouseOverrideJson.Unmarshal(&updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No TYPE argument specified. Loading names for Warehouses drop-down." + names, err := w.Warehouses.EndpointInfoNameToIdMap(ctx, sql.ListWarehousesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Warehouses drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The type of override behavior") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the type of override behavior") + } + updateDefaultWarehouseOverrideReq.Name = args[0] + if args[1] != "" { + updateMaskArray := strings.Split(args[1], ",") + updateDefaultWarehouseOverrideReq.UpdateMask = *fieldmask.New(updateMaskArray) + } + _, err = fmt.Sscan(args[2], &updateDefaultWarehouseOverrideReq.DefaultWarehouseOverride.Type) + if err != nil { + return fmt.Errorf("invalid TYPE: %s", args[2]) + } + + } + + response, err := w.Warehouses.UpdateDefaultWarehouseOverride(ctx, updateDefaultWarehouseOverrideReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateDefaultWarehouseOverrideOverrides { + fn(cmd, &updateDefaultWarehouseOverrideReq) + } + + return cmd +} + // start update-permissions command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/workspace-settings-v2/workspace-settings-v2.go b/cmd/workspace/workspace-settings-v2/workspace-settings-v2.go index 4c0c81c02b..e8e5cdf6fb 100755 --- a/cmd/workspace/workspace-settings-v2/workspace-settings-v2.go +++ b/cmd/workspace/workspace-settings-v2/workspace-settings-v2.go @@ -194,6 +194,8 @@ func newPatchPublicWorkspaceSetting() *cobra.Command { in a patch request, refer to the type field of the setting returned in the :method:settingsv2/listworkspacesettingsmetadata response. + Note: Page refresh is required for changes to take effect in UI. + Arguments: NAME: Name of the setting` diff --git a/go.mod b/go.mod index 3ebce0432f..e369d32466 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/charmbracelet/bubbles v0.21.0 // MIT github.com/charmbracelet/bubbletea v1.3.10 // MIT github.com/charmbracelet/lipgloss v1.1.0 // MIT - github.com/databricks/databricks-sdk-go v0.96.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.99.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 91b115f4da..f0112b8bfd 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.96.0 h1:tpR3GSwkM3Vd6P9KfYEXAJiKZ1KLJ2T2+J3tF8jxlEk= -github.com/databricks/databricks-sdk-go v0.96.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= +github.com/databricks/databricks-sdk-go v0.99.0 h1:iVTL8zE49WoKc6jXyDDqAc46BGG5MvzECdLozrAz0Q0= +github.com/databricks/databricks-sdk-go v0.99.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/python/databricks/bundles/jobs/__init__.py b/python/databricks/bundles/jobs/__init__.py index a1f36e2399..25a00982e1 100644 --- a/python/databricks/bundles/jobs/__init__.py +++ b/python/databricks/bundles/jobs/__init__.py @@ -153,6 +153,9 @@ "ModelTriggerConfigurationConditionParam", "ModelTriggerConfigurationDict", "ModelTriggerConfigurationParam", + "NodeTypeFlexibility", + "NodeTypeFlexibilityDict", + "NodeTypeFlexibilityParam", "NotebookTask", "NotebookTaskDict", "NotebookTaskParam", @@ -522,6 +525,11 @@ ModelTriggerConfigurationCondition, ModelTriggerConfigurationConditionParam, ) +from databricks.bundles.jobs._models.node_type_flexibility import ( + NodeTypeFlexibility, + NodeTypeFlexibilityDict, + NodeTypeFlexibilityParam, +) from databricks.bundles.jobs._models.notebook_task import ( NotebookTask, NotebookTaskDict, diff --git a/python/databricks/bundles/jobs/_models/cluster_spec.py b/python/databricks/bundles/jobs/_models/cluster_spec.py index 3d3ae44aa8..a2662c5ca8 100644 --- a/python/databricks/bundles/jobs/_models/cluster_spec.py +++ b/python/databricks/bundles/jobs/_models/cluster_spec.py @@ -38,6 +38,10 @@ InitScriptInfoParam, ) from databricks.bundles.jobs._models.kind import Kind, KindParam +from databricks.bundles.jobs._models.node_type_flexibility import ( + NodeTypeFlexibility, + NodeTypeFlexibilityParam, +) from databricks.bundles.jobs._models.runtime_engine import ( RuntimeEngine, RuntimeEngineParam, @@ -125,6 +129,11 @@ class ClusterSpec: assigned. """ + driver_node_type_flexibility: VariableOrOptional[NodeTypeFlexibility] = None + """ + Flexible node type configuration for the driver node. + """ + driver_node_type_id: VariableOrOptional[str] = None """ The node type of the Spark driver. @@ -258,6 +267,11 @@ class ClusterSpec: `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. """ + worker_node_type_flexibility: VariableOrOptional[NodeTypeFlexibility] = None + """ + Flexible node type configuration for worker nodes. + """ + workload_type: VariableOrOptional[WorkloadType] = None @classmethod @@ -339,6 +353,11 @@ class ClusterSpecDict(TypedDict, total=False): assigned. """ + driver_node_type_flexibility: VariableOrOptional[NodeTypeFlexibilityParam] + """ + Flexible node type configuration for the driver node. + """ + driver_node_type_id: VariableOrOptional[str] """ The node type of the Spark driver. @@ -472,6 +491,11 @@ class ClusterSpecDict(TypedDict, total=False): `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. """ + worker_node_type_flexibility: VariableOrOptional[NodeTypeFlexibilityParam] + """ + Flexible node type configuration for worker nodes. + """ + workload_type: VariableOrOptional[WorkloadTypeParam] diff --git a/python/databricks/bundles/jobs/_models/dashboard_task.py b/python/databricks/bundles/jobs/_models/dashboard_task.py index 6284ca36d3..b42ef0bdd5 100644 --- a/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.jobs._models.subscription import ( - Subscription, - SubscriptionParam, -) +from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam if TYPE_CHECKING: from typing_extensions import Self diff --git a/python/databricks/bundles/jobs/_models/job.py b/python/databricks/bundles/jobs/_models/job.py index e836d4c9a8..2a4c32676f 100644 --- a/python/databricks/bundles/jobs/_models/job.py +++ b/python/databricks/bundles/jobs/_models/job.py @@ -9,10 +9,7 @@ VariableOrList, VariableOrOptional, ) -from databricks.bundles.jobs._models.continuous import ( - Continuous, - ContinuousParam, -) +from databricks.bundles.jobs._models.continuous import Continuous, ContinuousParam from databricks.bundles.jobs._models.cron_schedule import ( CronSchedule, CronScheduleParam, diff --git a/python/databricks/bundles/jobs/_models/node_type_flexibility.py b/python/databricks/bundles/jobs/_models/node_type_flexibility.py new file mode 100644 index 0000000000..1f6dade356 --- /dev/null +++ b/python/databricks/bundles/jobs/_models/node_type_flexibility.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrList + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class NodeTypeFlexibility: + """ + Configuration for flexible node types, allowing fallback to alternate node types during cluster launch and upscale. + """ + + alternate_node_type_ids: VariableOrList[str] = field(default_factory=list) + """ + A list of node type IDs to use as fallbacks when the primary node type is unavailable. + """ + + @classmethod + def from_dict(cls, value: "NodeTypeFlexibilityDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "NodeTypeFlexibilityDict": + return _transform_to_json_value(self) # type:ignore + + +class NodeTypeFlexibilityDict(TypedDict, total=False): + """""" + + alternate_node_type_ids: VariableOrList[str] + """ + A list of node type IDs to use as fallbacks when the primary node type is unavailable. + """ + + +NodeTypeFlexibilityParam = NodeTypeFlexibilityDict | NodeTypeFlexibility diff --git a/python/databricks/bundles/pipelines/__init__.py b/python/databricks/bundles/pipelines/__init__.py index cadfc3e87b..2aef912fa7 100644 --- a/python/databricks/bundles/pipelines/__init__.py +++ b/python/databricks/bundles/pipelines/__init__.py @@ -2,6 +2,9 @@ "Adlsgen2Info", "Adlsgen2InfoDict", "Adlsgen2InfoParam", + "AutoFullRefreshPolicy", + "AutoFullRefreshPolicyDict", + "AutoFullRefreshPolicyParam", "AwsAttributes", "AwsAttributesDict", "AwsAttributesParam", @@ -81,6 +84,9 @@ "Notifications", "NotificationsDict", "NotificationsParam", + "OperationTimeWindow", + "OperationTimeWindowDict", + "OperationTimeWindowParam", "PathPattern", "PathPatternDict", "PathPatternParam", @@ -155,6 +161,11 @@ Adlsgen2InfoDict, Adlsgen2InfoParam, ) +from databricks.bundles.pipelines._models.auto_full_refresh_policy import ( + AutoFullRefreshPolicy, + AutoFullRefreshPolicyDict, + AutoFullRefreshPolicyParam, +) from databricks.bundles.pipelines._models.aws_attributes import ( AwsAttributes, AwsAttributesDict, @@ -287,6 +298,11 @@ NotificationsDict, NotificationsParam, ) +from databricks.bundles.pipelines._models.operation_time_window import ( + OperationTimeWindow, + OperationTimeWindowDict, + OperationTimeWindowParam, +) from databricks.bundles.pipelines._models.path_pattern import ( PathPattern, PathPatternDict, diff --git a/python/databricks/bundles/pipelines/_models/auto_full_refresh_policy.py b/python/databricks/bundles/pipelines/_models/auto_full_refresh_policy.py new file mode 100644 index 0000000000..e8b5b69de3 --- /dev/null +++ b/python/databricks/bundles/pipelines/_models/auto_full_refresh_policy.py @@ -0,0 +1,54 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOr, VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class AutoFullRefreshPolicy: + """ + Policy for auto full refresh. + """ + + enabled: VariableOr[bool] + """ + (Required, Mutable) Whether to enable auto full refresh or not. + """ + + min_interval_hours: VariableOrOptional[int] = None + """ + (Optional, Mutable) Specify the minimum interval in hours between the timestamp + at which a table was last full refreshed and the current timestamp for triggering auto full + If unspecified and autoFullRefresh is enabled then by default min_interval_hours is 24 hours. + """ + + @classmethod + def from_dict(cls, value: "AutoFullRefreshPolicyDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "AutoFullRefreshPolicyDict": + return _transform_to_json_value(self) # type:ignore + + +class AutoFullRefreshPolicyDict(TypedDict, total=False): + """""" + + enabled: VariableOr[bool] + """ + (Required, Mutable) Whether to enable auto full refresh or not. + """ + + min_interval_hours: VariableOrOptional[int] + """ + (Optional, Mutable) Specify the minimum interval in hours between the timestamp + at which a table was last full refreshed and the current timestamp for triggering auto full + If unspecified and autoFullRefresh is enabled then by default min_interval_hours is 24 hours. + """ + + +AutoFullRefreshPolicyParam = AutoFullRefreshPolicyDict | AutoFullRefreshPolicy diff --git a/python/databricks/bundles/pipelines/_models/day_of_week.py b/python/databricks/bundles/pipelines/_models/day_of_week.py index a685c2b308..648acf7f79 100644 --- a/python/databricks/bundles/pipelines/_models/day_of_week.py +++ b/python/databricks/bundles/pipelines/_models/day_of_week.py @@ -4,8 +4,6 @@ class DayOfWeek(Enum): """ - :meta private: [EXPERIMENTAL] - Days of week in which the window is allowed to happen. If not specified all days of the week will be used. """ diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py index dc3b447396..5954f21daa 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py @@ -8,6 +8,10 @@ IngestionConfig, IngestionConfigParam, ) +from databricks.bundles.pipelines._models.operation_time_window import ( + OperationTimeWindow, + OperationTimeWindowParam, +) from databricks.bundles.pipelines._models.source_config import ( SourceConfig, SourceConfigParam, @@ -30,6 +34,11 @@ class IngestionPipelineDefinition: Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on. """ + full_refresh_window: VariableOrOptional[OperationTimeWindow] = None + """ + (Optional) A window that specifies a set of time ranges for snapshot queries in CDC. + """ + ingest_from_uc_foreign_catalog: VariableOrOptional[bool] = None """ :meta private: [EXPERIMENTAL] @@ -81,6 +90,11 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False): Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on. """ + full_refresh_window: VariableOrOptional[OperationTimeWindowParam] + """ + (Optional) A window that specifies a set of time ranges for snapshot queries in CDC. + """ + ingest_from_uc_foreign_catalog: VariableOrOptional[bool] """ :meta private: [EXPERIMENTAL] diff --git a/python/databricks/bundles/pipelines/_models/operation_time_window.py b/python/databricks/bundles/pipelines/_models/operation_time_window.py new file mode 100644 index 0000000000..85720ab6c1 --- /dev/null +++ b/python/databricks/bundles/pipelines/_models/operation_time_window.py @@ -0,0 +1,69 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import ( + VariableOr, + VariableOrList, + VariableOrOptional, +) +from databricks.bundles.pipelines._models.day_of_week import DayOfWeek, DayOfWeekParam + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class OperationTimeWindow: + """ + Proto representing a window + """ + + start_hour: VariableOr[int] + """ + An integer between 0 and 23 denoting the start hour for the window in the 24-hour day. + """ + + days_of_week: VariableOrList[DayOfWeek] = field(default_factory=list) + """ + Days of week in which the window is allowed to happen + If not specified all days of the week will be used. + """ + + time_zone_id: VariableOrOptional[str] = None + """ + Time zone id of window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. + """ + + @classmethod + def from_dict(cls, value: "OperationTimeWindowDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "OperationTimeWindowDict": + return _transform_to_json_value(self) # type:ignore + + +class OperationTimeWindowDict(TypedDict, total=False): + """""" + + start_hour: VariableOr[int] + """ + An integer between 0 and 23 denoting the start hour for the window in the 24-hour day. + """ + + days_of_week: VariableOrList[DayOfWeekParam] + """ + Days of week in which the window is allowed to happen + If not specified all days of the week will be used. + """ + + time_zone_id: VariableOrOptional[str] + """ + Time zone id of window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. + If not specified, UTC will be used. + """ + + +OperationTimeWindowParam = OperationTimeWindowDict | OperationTimeWindow diff --git a/python/databricks/bundles/pipelines/_models/table_specific_config.py b/python/databricks/bundles/pipelines/_models/table_specific_config.py index 218a8581e5..aa5ab57602 100644 --- a/python/databricks/bundles/pipelines/_models/table_specific_config.py +++ b/python/databricks/bundles/pipelines/_models/table_specific_config.py @@ -4,6 +4,10 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrList, VariableOrOptional +from databricks.bundles.pipelines._models.auto_full_refresh_policy import ( + AutoFullRefreshPolicy, + AutoFullRefreshPolicyParam, +) from databricks.bundles.pipelines._models.ingestion_pipeline_definition_table_specific_config_query_based_connector_config import ( IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig, IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam, @@ -25,6 +29,21 @@ class TableSpecificConfig: """""" + auto_full_refresh_policy: VariableOrOptional[AutoFullRefreshPolicy] = None + """ + (Optional, Mutable) Policy for auto full refresh, if enabled pipeline will automatically try + to fix issues by doing a full refresh on the table in the retry run. auto_full_refresh_policy + in table configuration will override the above level auto_full_refresh_policy. + For example, + { + "auto_full_refresh_policy": { + "enabled": true, + "min_interval_hours": 23, + } + } + If unspecified, auto full refresh is disabled. + """ + exclude_columns: VariableOrList[str] = field(default_factory=list) """ A list of column names to be excluded for the ingestion. @@ -102,6 +121,21 @@ def as_dict(self) -> "TableSpecificConfigDict": class TableSpecificConfigDict(TypedDict, total=False): """""" + auto_full_refresh_policy: VariableOrOptional[AutoFullRefreshPolicyParam] + """ + (Optional, Mutable) Policy for auto full refresh, if enabled pipeline will automatically try + to fix issues by doing a full refresh on the table in the retry run. auto_full_refresh_policy + in table configuration will override the above level auto_full_refresh_policy. + For example, + { + "auto_full_refresh_policy": { + "enabled": true, + "min_interval_hours": 23, + } + } + If unspecified, auto full refresh is disabled. + """ + exclude_columns: VariableOrList[str] """ A list of column names to be excluded for the ingestion. From f396890a82c6cd6fb1e3851f427fd3f6f85df01a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Jan 2026 17:48:30 +0100 Subject: [PATCH 2/4] lint + test output --- acceptance/bundle/refschema/out.fields.txt | 82 ++++++++++++++++++++++ bundle/direct/dresources/cluster.go | 6 ++ 2 files changed, 88 insertions(+) diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 52ca8c3802..8b069ea3b4 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -273,6 +273,9 @@ resources.clusters.*.driver.private_ip string REMOTE resources.clusters.*.driver.public_dns string REMOTE resources.clusters.*.driver.start_timestamp int64 REMOTE resources.clusters.*.driver_instance_pool_id string ALL +resources.clusters.*.driver_node_type_flexibility *compute.NodeTypeFlexibility ALL +resources.clusters.*.driver_node_type_flexibility.alternate_node_type_ids []string ALL +resources.clusters.*.driver_node_type_flexibility.alternate_node_type_ids[*] string ALL resources.clusters.*.driver_node_type_id string ALL resources.clusters.*.enable_elastic_disk bool ALL resources.clusters.*.enable_local_disk_encryption bool ALL @@ -391,6 +394,9 @@ resources.clusters.*.spec.docker_image.basic_auth.password string REMOTE resources.clusters.*.spec.docker_image.basic_auth.username string REMOTE resources.clusters.*.spec.docker_image.url string REMOTE resources.clusters.*.spec.driver_instance_pool_id string REMOTE +resources.clusters.*.spec.driver_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.clusters.*.spec.driver_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.clusters.*.spec.driver_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.clusters.*.spec.driver_node_type_id string REMOTE resources.clusters.*.spec.enable_elastic_disk bool REMOTE resources.clusters.*.spec.enable_local_disk_encryption bool REMOTE @@ -442,6 +448,9 @@ resources.clusters.*.spec.ssh_public_keys []string REMOTE resources.clusters.*.spec.ssh_public_keys[*] string REMOTE resources.clusters.*.spec.total_initial_remote_disk_size int REMOTE resources.clusters.*.spec.use_ml_runtime bool REMOTE +resources.clusters.*.spec.worker_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.clusters.*.spec.worker_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.clusters.*.spec.worker_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.clusters.*.spec.workload_type *compute.WorkloadType REMOTE resources.clusters.*.spec.workload_type.clients compute.ClientsTypes REMOTE resources.clusters.*.spec.workload_type.clients.jobs bool REMOTE @@ -460,6 +469,9 @@ resources.clusters.*.termination_reason.type compute.TerminationReasonType REMOT resources.clusters.*.total_initial_remote_disk_size int ALL resources.clusters.*.url string INPUT resources.clusters.*.use_ml_runtime bool ALL +resources.clusters.*.worker_node_type_flexibility *compute.NodeTypeFlexibility ALL +resources.clusters.*.worker_node_type_flexibility.alternate_node_type_ids []string ALL +resources.clusters.*.worker_node_type_flexibility.alternate_node_type_ids[*] string ALL resources.clusters.*.workload_type *compute.WorkloadType ALL resources.clusters.*.workload_type.clients compute.ClientsTypes ALL resources.clusters.*.workload_type.clients.jobs bool ALL @@ -709,6 +721,9 @@ resources.jobs.*.job_clusters[*].new_cluster.docker_image.basic_auth.password st resources.jobs.*.job_clusters[*].new_cluster.docker_image.basic_auth.username string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.docker_image.url string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.driver_instance_pool_id string INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.driver_node_type_id string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.enable_elastic_disk bool INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.enable_local_disk_encryption bool INPUT STATE @@ -760,6 +775,9 @@ resources.jobs.*.job_clusters[*].new_cluster.ssh_public_keys []string INPUT STAT resources.jobs.*.job_clusters[*].new_cluster.ssh_public_keys[*] string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.total_initial_remote_disk_size int INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.use_ml_runtime bool INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.job_clusters[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.workload_type *compute.WorkloadType INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.workload_type.clients compute.ClientsTypes INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.workload_type.clients.jobs bool INPUT STATE @@ -897,6 +915,9 @@ resources.jobs.*.settings.job_clusters[*].new_cluster.docker_image.basic_auth.pa resources.jobs.*.settings.job_clusters[*].new_cluster.docker_image.basic_auth.username string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.docker_image.url string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.driver_instance_pool_id string REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.driver_node_type_id string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.enable_elastic_disk bool REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.enable_local_disk_encryption bool REMOTE @@ -948,6 +969,9 @@ resources.jobs.*.settings.job_clusters[*].new_cluster.ssh_public_keys []string R resources.jobs.*.settings.job_clusters[*].new_cluster.ssh_public_keys[*] string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.total_initial_remote_disk_size int REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.use_ml_runtime bool REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.job_clusters[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.workload_type *compute.WorkloadType REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.workload_type.clients compute.ClientsTypes REMOTE resources.jobs.*.settings.job_clusters[*].new_cluster.workload_type.clients.jobs bool REMOTE @@ -1181,6 +1205,9 @@ resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.docker_image.b resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.docker_image.basic_auth.username string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.docker_image.url string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.driver_instance_pool_id string REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.driver_node_type_id string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.enable_elastic_disk bool REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.enable_local_disk_encryption bool REMOTE @@ -1232,6 +1259,9 @@ resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.ssh_public_key resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.ssh_public_keys[*] string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.total_initial_remote_disk_size int REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.use_ml_runtime bool REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.workload_type *compute.WorkloadType REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.workload_type.clients compute.ClientsTypes REMOTE resources.jobs.*.settings.tasks[*].for_each_task.task.new_cluster.workload_type.clients.jobs bool REMOTE @@ -1435,6 +1465,9 @@ resources.jobs.*.settings.tasks[*].new_cluster.docker_image.basic_auth.password resources.jobs.*.settings.tasks[*].new_cluster.docker_image.basic_auth.username string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.docker_image.url string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.driver_instance_pool_id string REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.driver_node_type_id string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.enable_elastic_disk bool REMOTE resources.jobs.*.settings.tasks[*].new_cluster.enable_local_disk_encryption bool REMOTE @@ -1486,6 +1519,9 @@ resources.jobs.*.settings.tasks[*].new_cluster.ssh_public_keys []string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.ssh_public_keys[*] string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.total_initial_remote_disk_size int REMOTE resources.jobs.*.settings.tasks[*].new_cluster.use_ml_runtime bool REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string REMOTE +resources.jobs.*.settings.tasks[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string REMOTE resources.jobs.*.settings.tasks[*].new_cluster.workload_type *compute.WorkloadType REMOTE resources.jobs.*.settings.tasks[*].new_cluster.workload_type.clients compute.ClientsTypes REMOTE resources.jobs.*.settings.tasks[*].new_cluster.workload_type.clients.jobs bool REMOTE @@ -1853,6 +1889,9 @@ resources.jobs.*.tasks[*].for_each_task.task.new_cluster.docker_image.basic_auth resources.jobs.*.tasks[*].for_each_task.task.new_cluster.docker_image.basic_auth.username string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.docker_image.url string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.driver_instance_pool_id string INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.driver_node_type_id string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.enable_elastic_disk bool INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.enable_local_disk_encryption bool INPUT STATE @@ -1904,6 +1943,9 @@ resources.jobs.*.tasks[*].for_each_task.task.new_cluster.ssh_public_keys []strin resources.jobs.*.tasks[*].for_each_task.task.new_cluster.ssh_public_keys[*] string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.total_initial_remote_disk_size int INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.use_ml_runtime bool INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.tasks[*].for_each_task.task.new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.workload_type *compute.WorkloadType INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.workload_type.clients compute.ClientsTypes INPUT STATE resources.jobs.*.tasks[*].for_each_task.task.new_cluster.workload_type.clients.jobs bool INPUT STATE @@ -2107,6 +2149,9 @@ resources.jobs.*.tasks[*].new_cluster.docker_image.basic_auth.password string IN resources.jobs.*.tasks[*].new_cluster.docker_image.basic_auth.username string INPUT STATE resources.jobs.*.tasks[*].new_cluster.docker_image.url string INPUT STATE resources.jobs.*.tasks[*].new_cluster.driver_instance_pool_id string INPUT STATE +resources.jobs.*.tasks[*].new_cluster.driver_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.tasks[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.tasks[*].new_cluster.driver_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.tasks[*].new_cluster.driver_node_type_id string INPUT STATE resources.jobs.*.tasks[*].new_cluster.enable_elastic_disk bool INPUT STATE resources.jobs.*.tasks[*].new_cluster.enable_local_disk_encryption bool INPUT STATE @@ -2158,6 +2203,9 @@ resources.jobs.*.tasks[*].new_cluster.ssh_public_keys []string INPUT STATE resources.jobs.*.tasks[*].new_cluster.ssh_public_keys[*] string INPUT STATE resources.jobs.*.tasks[*].new_cluster.total_initial_remote_disk_size int INPUT STATE resources.jobs.*.tasks[*].new_cluster.use_ml_runtime bool INPUT STATE +resources.jobs.*.tasks[*].new_cluster.worker_node_type_flexibility *compute.NodeTypeFlexibility INPUT STATE +resources.jobs.*.tasks[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids []string INPUT STATE +resources.jobs.*.tasks[*].new_cluster.worker_node_type_flexibility.alternate_node_type_ids[*] string INPUT STATE resources.jobs.*.tasks[*].new_cluster.workload_type *compute.WorkloadType INPUT STATE resources.jobs.*.tasks[*].new_cluster.workload_type.clients compute.ClientsTypes INPUT STATE resources.jobs.*.tasks[*].new_cluster.workload_type.clients.jobs bool INPUT STATE @@ -2973,6 +3021,11 @@ resources.pipelines.*.health pipelines.GetPipelineResponseHealth REMOTE resources.pipelines.*.id string INPUT STATE resources.pipelines.*.ingestion_definition *pipelines.IngestionPipelineDefinition INPUT STATE resources.pipelines.*.ingestion_definition.connection_name string INPUT STATE +resources.pipelines.*.ingestion_definition.full_refresh_window *pipelines.OperationTimeWindow INPUT STATE +resources.pipelines.*.ingestion_definition.full_refresh_window.days_of_week []pipelines.DayOfWeek INPUT STATE +resources.pipelines.*.ingestion_definition.full_refresh_window.days_of_week[*] pipelines.DayOfWeek INPUT STATE +resources.pipelines.*.ingestion_definition.full_refresh_window.start_hour int INPUT STATE +resources.pipelines.*.ingestion_definition.full_refresh_window.time_zone_id string INPUT STATE resources.pipelines.*.ingestion_definition.ingest_from_uc_foreign_catalog bool INPUT STATE resources.pipelines.*.ingestion_definition.ingestion_gateway_id string INPUT STATE resources.pipelines.*.ingestion_definition.netsuite_jar_path string INPUT STATE @@ -2984,6 +3037,9 @@ resources.pipelines.*.ingestion_definition.objects[*].report.destination_schema resources.pipelines.*.ingestion_definition.objects[*].report.destination_table string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.source_url string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration *pipelines.TableSpecificConfig INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy.enabled bool INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy.min_interval_hours int INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.exclude_columns []string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.exclude_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.include_columns []string INPUT STATE @@ -3014,6 +3070,9 @@ resources.pipelines.*.ingestion_definition.objects[*].schema.destination_schema resources.pipelines.*.ingestion_definition.objects[*].schema.source_catalog string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.source_schema string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration *pipelines.TableSpecificConfig INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy.enabled bool INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy.min_interval_hours int INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.exclude_columns []string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.exclude_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.include_columns []string INPUT STATE @@ -3046,6 +3105,9 @@ resources.pipelines.*.ingestion_definition.objects[*].table.source_catalog strin resources.pipelines.*.ingestion_definition.objects[*].table.source_schema string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.source_table string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration *pipelines.TableSpecificConfig INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy.enabled bool INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy.min_interval_hours int INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.exclude_columns []string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.exclude_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.include_columns []string INPUT STATE @@ -3080,6 +3142,9 @@ resources.pipelines.*.ingestion_definition.source_configurations[*].catalog.post resources.pipelines.*.ingestion_definition.source_configurations[*].catalog.source_catalog string INPUT STATE resources.pipelines.*.ingestion_definition.source_type pipelines.IngestionSourceType INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration *pipelines.TableSpecificConfig INPUT STATE +resources.pipelines.*.ingestion_definition.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy INPUT STATE +resources.pipelines.*.ingestion_definition.table_configuration.auto_full_refresh_policy.enabled bool INPUT STATE +resources.pipelines.*.ingestion_definition.table_configuration.auto_full_refresh_policy.min_interval_hours int INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.exclude_columns []string INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.exclude_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.include_columns []string INPUT STATE @@ -3274,6 +3339,11 @@ resources.pipelines.*.spec.gateway_definition.gateway_storage_schema string REMO resources.pipelines.*.spec.id string REMOTE resources.pipelines.*.spec.ingestion_definition *pipelines.IngestionPipelineDefinition REMOTE resources.pipelines.*.spec.ingestion_definition.connection_name string REMOTE +resources.pipelines.*.spec.ingestion_definition.full_refresh_window *pipelines.OperationTimeWindow REMOTE +resources.pipelines.*.spec.ingestion_definition.full_refresh_window.days_of_week []pipelines.DayOfWeek REMOTE +resources.pipelines.*.spec.ingestion_definition.full_refresh_window.days_of_week[*] pipelines.DayOfWeek REMOTE +resources.pipelines.*.spec.ingestion_definition.full_refresh_window.start_hour int REMOTE +resources.pipelines.*.spec.ingestion_definition.full_refresh_window.time_zone_id string REMOTE resources.pipelines.*.spec.ingestion_definition.ingest_from_uc_foreign_catalog bool REMOTE resources.pipelines.*.spec.ingestion_definition.ingestion_gateway_id string REMOTE resources.pipelines.*.spec.ingestion_definition.netsuite_jar_path string REMOTE @@ -3285,6 +3355,9 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].report.destination_sc resources.pipelines.*.spec.ingestion_definition.objects[*].report.destination_table string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.source_url string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration *pipelines.TableSpecificConfig REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy.enabled bool REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.auto_full_refresh_policy.min_interval_hours int REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.exclude_columns []string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.exclude_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.include_columns []string REMOTE @@ -3315,6 +3388,9 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].schema.destination_sc resources.pipelines.*.spec.ingestion_definition.objects[*].schema.source_catalog string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.source_schema string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration *pipelines.TableSpecificConfig REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy.enabled bool REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.auto_full_refresh_policy.min_interval_hours int REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.exclude_columns []string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.exclude_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.include_columns []string REMOTE @@ -3347,6 +3423,9 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].table.source_catalog resources.pipelines.*.spec.ingestion_definition.objects[*].table.source_schema string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.source_table string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration *pipelines.TableSpecificConfig REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy.enabled bool REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.auto_full_refresh_policy.min_interval_hours int REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.exclude_columns []string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.exclude_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.include_columns []string REMOTE @@ -3381,6 +3460,9 @@ resources.pipelines.*.spec.ingestion_definition.source_configurations[*].catalog resources.pipelines.*.spec.ingestion_definition.source_configurations[*].catalog.source_catalog string REMOTE resources.pipelines.*.spec.ingestion_definition.source_type pipelines.IngestionSourceType REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration *pipelines.TableSpecificConfig REMOTE +resources.pipelines.*.spec.ingestion_definition.table_configuration.auto_full_refresh_policy *pipelines.AutoFullRefreshPolicy REMOTE +resources.pipelines.*.spec.ingestion_definition.table_configuration.auto_full_refresh_policy.enabled bool REMOTE +resources.pipelines.*.spec.ingestion_definition.table_configuration.auto_full_refresh_policy.min_interval_hours int REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.exclude_columns []string REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.exclude_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.include_columns []string REMOTE diff --git a/bundle/direct/dresources/cluster.go b/bundle/direct/dresources/cluster.go index 40c275cec5..4ddec08844 100644 --- a/bundle/direct/dresources/cluster.go +++ b/bundle/direct/dresources/cluster.go @@ -44,6 +44,7 @@ func (r *ResourceCluster) RemapState(input *compute.ClusterDetails) *compute.Clu DockerImage: input.DockerImage, DriverInstancePoolId: input.DriverInstancePoolId, DriverNodeTypeId: input.DriverNodeTypeId, + DriverNodeTypeFlexibility: input.DriverNodeTypeFlexibility, EnableElasticDisk: input.EnableElasticDisk, EnableLocalDiskEncryption: input.EnableLocalDiskEncryption, GcpAttributes: input.GcpAttributes, @@ -64,6 +65,7 @@ func (r *ResourceCluster) RemapState(input *compute.ClusterDetails) *compute.Clu TotalInitialRemoteDiskSize: input.TotalInitialRemoteDiskSize, UseMlRuntime: input.UseMlRuntime, WorkloadType: input.WorkloadType, + WorkerNodeTypeFlexibility: input.WorkerNodeTypeFlexibility, ForceSendFields: utils.FilterFields[compute.ClusterSpec](input.ForceSendFields), } if input.Spec != nil { @@ -159,6 +161,7 @@ func makeCreateCluster(config *compute.ClusterSpec) compute.CreateCluster { DockerImage: config.DockerImage, DriverInstancePoolId: config.DriverInstancePoolId, DriverNodeTypeId: config.DriverNodeTypeId, + DriverNodeTypeFlexibility: config.DriverNodeTypeFlexibility, EnableElasticDisk: config.EnableElasticDisk, EnableLocalDiskEncryption: config.EnableLocalDiskEncryption, GcpAttributes: config.GcpAttributes, @@ -179,6 +182,7 @@ func makeCreateCluster(config *compute.ClusterSpec) compute.CreateCluster { TotalInitialRemoteDiskSize: config.TotalInitialRemoteDiskSize, UseMlRuntime: config.UseMlRuntime, WorkloadType: config.WorkloadType, + WorkerNodeTypeFlexibility: config.WorkerNodeTypeFlexibility, ForceSendFields: utils.FilterFields[compute.CreateCluster](config.ForceSendFields), } @@ -206,6 +210,7 @@ func makeEditCluster(id string, config *compute.ClusterSpec) compute.EditCluster DockerImage: config.DockerImage, DriverInstancePoolId: config.DriverInstancePoolId, DriverNodeTypeId: config.DriverNodeTypeId, + DriverNodeTypeFlexibility: config.DriverNodeTypeFlexibility, EnableElasticDisk: config.EnableElasticDisk, EnableLocalDiskEncryption: config.EnableLocalDiskEncryption, GcpAttributes: config.GcpAttributes, @@ -226,6 +231,7 @@ func makeEditCluster(id string, config *compute.ClusterSpec) compute.EditCluster TotalInitialRemoteDiskSize: config.TotalInitialRemoteDiskSize, UseMlRuntime: config.UseMlRuntime, WorkloadType: config.WorkloadType, + WorkerNodeTypeFlexibility: config.WorkerNodeTypeFlexibility, ForceSendFields: utils.FilterFields[compute.EditCluster](config.ForceSendFields), } From 30dd35e38914ccd671d8b8cbe49eec9426c6f198 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Jan 2026 18:46:13 +0100 Subject: [PATCH 3/4] fix test output --- acceptance/help/output.txt | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt index 5c4bacc12d..320e313c6d 100644 --- a/acceptance/help/output.txt +++ b/acceptance/help/output.txt @@ -33,7 +33,7 @@ Real-time Serving serving-endpoints The Serving Endpoints API allows you to create, update, and delete model serving endpoints. Apps - apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. + apps Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. Vector Search vector-search-endpoints **Endpoint**: Represents the compute resources to host vector search indexes. @@ -81,7 +81,7 @@ Unity Catalog model-versions Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. online-tables Online tables provide lower latency and higher QPS access to data from Delta tables. policies Attribute-Based Access Control (ABAC) provides high leverage governance for enforcing compliance policies in Unity Catalog. - quality-monitors A monitor computes and monitors data or model quality metrics for a table over time. + quality-monitors [DEPRECATED] This API is deprecated. registered-models Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. resource-quotas Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created. rfa Request for Access enables users to request access for Unity Catalog securables. @@ -136,7 +136,7 @@ Clean Rooms clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other's data. Quality Monitor - quality-monitor-v2 Manage data quality of UC objects (currently support schema). + quality-monitor-v2 [DEPRECATED] This API is deprecated. Data Quality Monitoring data-quality Manage the data quality of Unity Catalog objects (currently support schema and table). @@ -149,6 +149,9 @@ Tags tag-policies The Tag Policy API allows you to manage policies for governed tags in Databricks. workspace-entity-tag-assignments Manage tag assignments on workspace-scoped objects. +Postgres + postgres Use the Postgres API to create and manage Lakebase Autoscaling Postgres infrastructure, including projects, branches, compute endpoints, and roles. + Developer Tools bundle Databricks Asset Bundles let you express data/AI/analytics projects as code. sync Synchronize a local directory to a workspace directory From daa39df57bd04bf3cf22b3c8e542910958409f94 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Jan 2026 20:33:38 +0100 Subject: [PATCH 4/4] revert tagging change --- .github/workflows/tagging.yml | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml index 6ccb26ad52..15c8060dd9 100644 --- a/.github/workflows/tagging.yml +++ b/.github/workflows/tagging.yml @@ -2,14 +2,10 @@ name: tagging on: - # Manual dispatch. workflow_dispatch: - # No inputs are required for the manual dispatch. - - # Runs at 8:00 UTC on Tuesday, Wednesday, and Thursday. To enable automated - # tagging for a repository, simply add it to the if block of the tag job. - schedule: - - cron: '0 8 * * TUE,WED,THU' + # Enable for automatic tagging + #schedule: + # - cron: '0 0 * * TUE' # Ensure that only a single instance of the workflow is running at a time. concurrency: @@ -17,14 +13,6 @@ concurrency: jobs: tag: - # Only run the tag job if the trigger is manual (workflow_dispatch) or - # the repository has been approved for automated releases. - # - # To disable release for a repository, simply exclude it from the if - # condition. - if: >- - github.event_name == 'workflow_dispatch' || - github.repository == 'databricks/databricks-sdk-go' environment: "release-is" runs-on: group: databricks-deco-testing-runner-group