Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions bundle/run/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,10 @@ func (r *jobRunner) Restart(ctx context.Context, opts *Options) (output.RunOutpu
return r.Run(ctx, opts)
}

s := cmdio.Spinner(ctx)
s <- "Cancelling all active job runs"
sp := cmdio.NewSpinner(ctx)
sp.Update("Cancelling all active job runs")
err := r.Cancel(ctx)
close(s)
sp.Close()
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions bundle/run/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,10 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error {
}

func (r *pipelineRunner) Restart(ctx context.Context, opts *Options) (output.RunOutput, error) {
s := cmdio.Spinner(ctx)
s <- "Cancelling the active pipeline update"
sp := cmdio.NewSpinner(ctx)
sp.Update("Cancelling the active pipeline update")
err := r.Cancel(ctx)
close(s)
sp.Close()
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/labs/project/entrypoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,14 +122,14 @@ func (e *Entrypoint) preparePython(ctx context.Context, environment map[string]s
}

func (e *Entrypoint) ensureRunningCluster(ctx context.Context, cfg *config.Config) error {
feedback := cmdio.Spinner(ctx)
defer close(feedback)
sp := cmdio.NewSpinner(ctx)
defer sp.Close()
w, err := databricks.NewWorkspaceClient((*databricks.Config)(cfg))
if err != nil {
return fmt.Errorf("workspace client: %w", err)
}
// TODO: add in-progress callback to EnsureClusterIsRunning() in SDK
feedback <- "Ensuring the cluster is running..."
sp.Update("Ensuring the cluster is running...")
err = w.Clusters.EnsureClusterIsRunning(ctx, cfg.ClusterID)
if err != nil {
return fmt.Errorf("ensure running: %w", err)
Expand Down
22 changes: 11 additions & 11 deletions cmd/labs/project/installer.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,17 +194,17 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err
}

func (i *installer) downloadLibrary(ctx context.Context) error {
feedback := cmdio.Spinner(ctx)
defer close(feedback)
feedback <- "Cleaning up previous installation if necessary"
sp := cmdio.NewSpinner(ctx)
defer sp.Close()
sp.Update("Cleaning up previous installation if necessary")
err := i.cleanupLib(ctx)
if err != nil {
return fmt.Errorf("cleanup: %w", err)
}
libTarget := i.LibDir()
// we may support wheels, jars, and golang binaries. but those are not zipballs
if i.IsZipball() {
feedback <- "Downloading and unpacking zipball for " + i.version
sp.Update("Downloading and unpacking zipball for " + i.version)
return i.downloadAndUnpackZipball(ctx, libTarget)
}
return errors.New("we only support zipballs for now")
Expand All @@ -224,9 +224,9 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
if !i.HasPython() {
return nil
}
feedback := cmdio.Spinner(ctx)
defer close(feedback)
feedback <- "Detecting all installed Python interpreters on the system"
sp := cmdio.NewSpinner(ctx)
defer sp.Close()
sp.Update("Detecting all installed Python interpreters on the system")
pythonInterpreters, err := DetectInterpreters(ctx)
if err != nil {
return fmt.Errorf("detect: %w", err)
Expand All @@ -238,13 +238,13 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path)
venvPath := i.virtualEnvPath(ctx)
log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath)
feedback <- "Creating Virtual Environment with Python " + py.Version
sp.Update("Creating Virtual Environment with Python " + py.Version)
_, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath})
if err != nil {
return fmt.Errorf("create venv: %w", err)
}
if i.Installer != nil && i.Installer.RequireDatabricksConnect {
feedback <- "Determining Databricks Connect version"
sp.Update("Determining Databricks Connect version")
cluster, err := w.Clusters.Get(ctx, compute.GetClusterRequest{
ClusterId: w.Config.ClusterID,
})
Expand All @@ -255,14 +255,14 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
if !ok {
return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion)
}
feedback <- "Installing Databricks Connect v" + runtimeVersion
sp.Update("Installing Databricks Connect v" + runtimeVersion)
pipSpec := "databricks-connect==" + runtimeVersion
err = i.installPythonDependencies(ctx, pipSpec)
if err != nil {
return fmt.Errorf("dbconnect: %w", err)
}
}
feedback <- "Installing Python library dependencies"
sp.Update("Installing Python library dependencies")
if i.Installer.Extras != "" {
// install main and optional dependencies
return i.installPythonDependencies(ctx, fmt.Sprintf(".[%s]", i.Installer.Extras))
Expand Down
6 changes: 3 additions & 3 deletions cmd/psql/psql.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ You can pass additional arguments to psql after a double-dash (--):
}

if argsBeforeDash != 1 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No DATABASE_INSTANCE_NAME argument specified. Loading names for Database instances drop-down."
sp := cmdio.NewSpinner(ctx)
sp.Update("No DATABASE_INSTANCE_NAME argument specified. Loading names for Database instances drop-down.")
instances, err := w.Database.ListDatabaseInstancesAll(ctx, database.ListDatabaseInstancesRequest{})
close(promptSpinner)
sp.Close()
if err != nil {
return fmt.Errorf("failed to load names for Database instances drop-down. Please manually specify required argument: DATABASE_INSTANCE_NAME. Original error: %w", err)
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/selftest/tui/spinner.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func newSpinner() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()

spinner := cmdio.Spinner(ctx)
sp := cmdio.NewSpinner(ctx)

// Test various status messages
messages := []struct {
Expand All @@ -29,11 +29,11 @@ func newSpinner() *cobra.Command {
}

for _, msg := range messages {
spinner <- msg.text
sp.Update(msg.text)
time.Sleep(msg.duration)
}

close(spinner)
sp.Close()

cmdio.LogString(ctx, "✓ Spinner test complete")
},
Expand Down
6 changes: 3 additions & 3 deletions experimental/ssh/internal/setup/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,14 +166,14 @@ func updateSSHConfigFile(configPath, hostConfig, hostName string) error {
}

func clusterSelectionPrompt(ctx context.Context, client *databricks.WorkspaceClient) (string, error) {
spinnerChan := cmdio.Spinner(ctx)
spinnerChan <- "Loading clusters."
sp := cmdio.NewSpinner(ctx)
sp.Update("Loading clusters.")
clusters, err := client.Clusters.ClusterDetailsClusterNameToClusterIdMap(ctx, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
},
})
close(spinnerChan)
sp.Close()
if err != nil {
return "", fmt.Errorf("failed to load names for Clusters drop-down. Please manually specify cluster argument. Original error: %w", err)
}
Expand Down
19 changes: 19 additions & 0 deletions libs/cmdio/io.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,25 @@ func Spinner(ctx context.Context) chan string {
return c.Spinner(ctx)
}

// NewSpinner creates a new spinner for displaying progress indicators.
// The returned spinner should be closed when done to release resources.
//
// Example:
//
// sp := cmdio.NewSpinner(ctx)
// defer sp.Close()
// for i := range 100 {
// sp.Update(fmt.Sprintf("processing item %d", i))
// time.Sleep(100 * time.Millisecond)
// }
//
// The spinner automatically degrades in non-interactive terminals (no output).
// Context cancellation will automatically close the spinner.
func NewSpinner(ctx context.Context) *spinner {
c := fromContext(ctx)
return c.NewSpinner(ctx)
}

type cmdIOType int

var cmdIOKey cmdIOType
Expand Down
113 changes: 82 additions & 31 deletions libs/cmdio/spinner.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@ import (
"sync"
"time"

"github.com/charmbracelet/bubbles/spinner"
bubblespinner "github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)

// spinnerModel is the Bubble Tea model for the spinner.
type spinnerModel struct {
spinner spinner.Model
spinner bubblespinner.Model
suffix string
quitting bool
}
Expand All @@ -25,9 +25,9 @@ type (

// newSpinnerModel creates a new spinner model.
func newSpinnerModel() spinnerModel {
s := spinner.New()
s := bubblespinner.New()
// Braille spinner frames with 200ms timing
s.Spinner = spinner.Spinner{
s.Spinner = bubblespinner.Spinner{
Frames: []string{"⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"},
FPS: time.Second / 5, // 200ms = 5 FPS
}
Expand All @@ -54,7 +54,7 @@ func (m spinnerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.quitting = true
return m, tea.Quit

case spinner.TickMsg:
case bubblespinner.TickMsg:
var cmd tea.Cmd
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
Expand All @@ -76,21 +76,55 @@ func (m spinnerModel) View() string {
return m.spinner.View()
}

// Spinner returns a channel for updating spinner status messages.
// Send messages to update the suffix, close the channel to stop.
// The spinner runs until the channel is closed or context is cancelled.
func (c *cmdIO) Spinner(ctx context.Context) chan string {
updates := make(chan string)
// spinner provides a structured interface for displaying progress indicators.
// Use NewSpinner to create an instance, Update to send status messages,
// and Close to stop the spinner and clean up resources.
//
// The spinner automatically degrades in non-interactive terminals.
// Context cancellation will automatically close the spinner.
type spinner struct {
p *tea.Program // nil in non-interactive mode
c *cmdIO
ctx context.Context
once sync.Once
done chan struct{} // Closed when tea.Program finishes
}

// Update sends a status message to the spinner.
// This operation sends directly to the tea.Program.
func (sp *spinner) Update(msg string) {
if sp.p != nil {
sp.p.Send(suffixMsg(msg))
}
}

// Close stops the spinner and releases resources.
// It waits for the spinner to fully terminate before returning.
// It is safe to call Close multiple times and from multiple goroutines.
func (sp *spinner) Close() {
sp.once.Do(func() {
if sp.p != nil {
sp.p.Send(quitMsg{})
}
})
// Always wait for termination, even if we weren't the first caller
if sp.p != nil {
<-sp.done
}
}

// NewSpinner creates a new spinner for displaying progress.
// The spinner should be closed when done to clean up resources.
//
// Example:
//
// sp := cmdio.NewSpinner(ctx)
// defer sp.Close()
// sp.Update("processing files")
func (c *cmdIO) NewSpinner(ctx context.Context) *spinner {
// Don't show spinner if not interactive
if !c.capabilities.SupportsInteractive() {
// Return channel but don't start program - just drain messages
go func() {
for range updates {
// Discard messages
}
}()
return updates
return &spinner{p: nil, c: c, ctx: ctx}
}

// Create model and program
Expand All @@ -108,17 +142,40 @@ func (c *cmdIO) Spinner(ctx context.Context) chan string {
// Acquire program slot (queues if another program is running)
c.acquireTeaProgram(p)

// Track both goroutines to ensure clean shutdown
var wg sync.WaitGroup
done := make(chan struct{})
sp := &spinner{
p: p,
c: c,
ctx: ctx,
done: done,
}

// Start program in background
wg.Go(func() {
go func() {
_, _ = p.Run()
})
c.releaseTeaProgram()
close(done)
}()

// Handle context cancellation
go func() {
<-ctx.Done()
sp.Close()
}()

return sp
}

// Spinner returns a channel for updating spinner status messages.
// Send messages to update the suffix, close the channel to stop.
// The spinner runs until the channel is closed or context is cancelled.
func (c *cmdIO) Spinner(ctx context.Context) chan string {
updates := make(chan string)
sp := c.NewSpinner(ctx)

// Bridge goroutine: channel -> tea messages
wg.Go(func() {
defer p.Send(quitMsg{})
// Bridge goroutine: channel -> spinner.Update()
go func() {
defer sp.Close()

for {
select {
Expand All @@ -129,15 +186,9 @@ func (c *cmdIO) Spinner(ctx context.Context) chan string {
// Channel closed
return
}
p.Send(suffixMsg(msg))
sp.Update(msg)
}
}
})

// Wait for both goroutines, then release
go func() {
wg.Wait()
c.releaseTeaProgram()
}()

return updates
Expand Down
Loading