diff --git a/CHANGELOG.md b/CHANGELOG.md index 58bde92..997f444 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## [0.5.0] - 2026-02-07 +### Added +- **Local AI support** — Added `ollama` provider for offline, private commit generation + Run `gitc --provider ollama` to use local models, no API key required! +- Custom Ollama URL support — Connect to any Ollama instance via `--url` flag + e.g., `gitc --provider ollama --url "http://localhost:11434//api/generate"` + +### Changed +- Enhanced provider architecture — Unified interface now supports both cloud and local AI +- Improved API compatibility layer — Handles OpenAI-style and Ollama-style APIs seamlessly +- Updated model defaults — Ollama uses `llama3.2` as default local model + +### Fixed +- API key validation now optional for Ollama provider +- Better error messages for local AI connection issues +- Configuration validation adjusted for different provider requirements + +--- + ## [0.4.0] - 2025-12-08 ### Added - **`gitc file1 file2 ...`** — Auto-stage files and generate commit message instantly @@ -53,4 +72,4 @@ --- ## [0.1.1] - 2025-04-01 -- Initial release with OpenAI support. +- Initial release with OpenAI support. \ No newline at end of file diff --git a/README.md b/README.md index 97a406a..1c6fc58 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -
- gitc AI-Powered Commits -
- # ✨ gitc - AI-Powered Git Commit Messages [![Go Reference](https://pkg.go.dev/badge/github.com/dll-as/gitc)](https://pkg.go.dev/github.com/dll-as/gitc) @@ -28,9 +24,13 @@ `gitc` is a lightweight CLI tool that leverages AI to craft clear, standards-compliant Git commit messages from your diffs. Supporting [Conventional Commits](https://www.conventionalcommits.org), [Gitmoji](https://gitmoji.dev), and custom rules, it saves time and boosts commit quality for you and your team. - 🧠 **AI-Powered Commits** - - Generates context-aware commit messages using OpenAI, Grok (xAI), or DeepSeek. + - Generates context-aware commit messages using OpenAI, Grok (xAI), DeepSeek, or Ollama (local). - Supports multiple languages (e.g., English, Persian, Russian) for global teams. - - Extensible for future AI providers like Gemini. + - Extensible for future AI providers. + +- 🏠 **Local AI Support** + - Run Ollama locally for private, offline commit generation - no API key required! + - Connect to any Ollama instance via custom URLs. - 📝 **Standards & Customization** - Follows [Conventional Commits](https://www.conventionalcommits.org) (`feat`, `fix`, `docs`, etc.) for semantic versioning. @@ -44,10 +44,12 @@ - ⚙️ **Flexible Configuration** - Supports CLI flags, environment variables, and `~/.gitc/config.json`. - Includes proxy support, adjustable timeouts, and redirect limits. + - No API key required for local Ollama provider. - ⚡️ **Performance & Reliability** - Fast JSON parsing with [sonic](https://github.com/bytedance/sonic) and HTTP requests with [fasthttp](https://github.com/valyala/fasthttp). - Robust error handling for reliable operation. + - Unified interface supporting both cloud and local AI providers. - 🧪 Debug & Dry-Run - Preview prompts and configs without API calls — perfect for tuning without burning tokens. @@ -56,7 +58,8 @@ ### Prerequisites: - Go: Version **1.18** or higher (required for building from source). - Git: Required for retrieving staged changes. - - OpenAI API Key: Required for AI-powered commit message generation. Set it via the `AI_API_KEY` environment variable or in the config file. + - API Key: Required for cloud AI providers (OpenAI, Grok, DeepSeek). + - Ollama: Optional for local AI (install from [ollama.ai](https://ollama.ai)) #### Quick Install: ```bash @@ -86,6 +89,9 @@ gitc gitc bot.py gitc src/utils.go main.go +# Use local Ollama for private commits +gitc --provider ollama + # Pro Tip: Add emojis and specify language gitc --emoji --lang fa @@ -98,9 +104,10 @@ gitc --dry-run ## Environment Variables ```bash -export OPENAI_API_KEY="sk-your-key-here" +export AI_API_KEY="sk-your-key-here" # For cloud providers export GITC_LANGUAGE="fa" export GITC_MODEL="gpt-4" +export GITC_PROVIDER="ollama" # Use local Ollama by default ``` # ⚙️ Configuration @@ -114,19 +121,15 @@ Config File (`~/.gitc/config.json`) : "language": "en", "timeout": 10, "commit_type": "", - "custom-convention": "", + "custom_convention": "", "use_gitmoji": false, - "max_redirects": 5, - "open_ai": { - "api_key": "sk-your-key-here", - "model": "gpt-4o-mini", - "url": "https://api.openai.com/v1/chat/completions" - } + "max_redirects": 5 } ``` ### Update Configuration ```bash +gitc config --provider ollama --model llama3.2 gitc config --api-key "sk-your-key-here" --model "gpt-4o-mini" --lang en ``` @@ -137,14 +140,14 @@ The following CLI flags are available for the `ai-commit` command and its `confi | Flag | Alias | Description | Default | Environment Variable | Example | |------|-------|-------------|---------|----------------------|---------| | `--all` | `-a` | Stage all changes before generating commit message (equivalent to `git add .`) | `false` | `GITC_STAGE_ALL` | `-all` or `-a` -| `--provider` | - | AI provider to use (e.g., `openai`, `anthropic`) | `openai` | `AI_PROVIDER` | `--provider openai` | -| `--url` | `-u` | Custom API URL for the AI provider | Provider-specific | `GITC_API_URL` | `--url https://api.x.ai/v1/chat/completions` -| `--model` | - | OpenAI model for commit message generation | `gpt-4o-mini` | - | `--model gpt-4o` | +| `--provider` | - | AI provider to use (e.g., `openai`, `grok`, `deepseek`, `ollama`) | `openai` | `AI_PROVIDER` | `--provider ollama` | +| `--url` | `-u` | Custom API URL for the AI provider | Provider-specific | `GITC_API_URL` | `--url http://localhost:11434/api/chat` +| `--model` | - | AI model for commit message generation | Provider-specific | `GITC_MODEL` | `--model llama3.2` | | `--lang` | - | Language for commit messages (e.g., `en`, `fa`, `ru`) | `en` | `GITC_LANGUAGE` | `--lang fa` | | `--timeout` | - | Request timeout in seconds | `10` | - | `--timeout 15` | | `--max-length` | - | Maximum length of the commit message | `200` | - | `--max-length 150` | | `--temperature` | - | Control AI creativity (0.0 = fully deterministic, 1.0 = very creative) | `0.7` | - | `--temperature 0.8` -| `--api-key` | `-k` | API key for the AI provider | - | `AI_API_KEY` | `--api-key sk-xxx` | +| `--api-key` | `-k` | API key for the AI provider (not required for Ollama) | - | `AI_API_KEY` | `--api-key sk-xxx` | | `--proxy` | `-p` | Proxy URL for API requests | - | `GITC_PROXY` | `--proxy http://proxy.example.com:8080` | | `--commit-type` | `-t` | Commit type for Conventional Commits (e.g., `feat`, `fix`) | - | `GITC_COMMIT_TYPE` | `--commit-type feat` | | `--scope` | `-s` | Add scope to the commit type (e.g. `auth`, `ui`, `db`) — works with or without `--commit-type` | - | - | `--scope auth` or `-s ui` | @@ -156,30 +159,37 @@ The following CLI flags are available for the `ai-commit` command and its `confi | `--config` | `-c` | Path to the configuration file | `~/.gitc/config.json` | `GITC_CONFIG_PATH` | `--config ./my-config.json` | > [!NOTE] +> - **Ollama Note**: No API key required when using `--provider ollama` +> - **Default URLs**: +> - OpenAI: `https://api.openai.com/v1/chat/completions` +> - Grok: `https://api.x.ai/v1/chat/completions` +> - DeepSeek: `https://api.deepseek.com/v1/chat/completions` +> - Ollama: `http://localhost:11434/api/chat` +> - **Default Models**: +> - OpenAI: `gpt-4o-mini` +> - Ollama: `llama3.2` > - Flags for the `config` subcommand are similar but exclude defaults, as they override the config file. > - **Flags** > **Environment Variables** > **Config File** — This is the order of precedence when multiple settings are provided. > - The `--custom-convention` flag expects a JSON string with a `prefix` field (e.g., `{"prefix": "JIRA-123"}`). -> - The `--version` flag displays the current tool version (e.g., `0.3.0`) and can be used to verify installation. +> - The `--version` flag displays the current tool version (e.g., `0.5.0`) and can be used to verify installation. > - The `--all` flag (alias `-a`) stages all changes in the working directory before generating the commit message, streamlining the workflow. For example, `gitc -a --emoji` stages all changes and generates a commit message with Gitmoji. > - Environment variables take precedence over config file settings but are overridden by CLI flags. -> - You can reset all configuration values to their defaults by using gitc config `gitc reset-config`. - +> - You can reset all configuration values to their defaults by using `gitc reset-config`. ## 🤖 AI Providers -`gitc` is designed to be AI-provider agnostic. While it currently supports OpenAI out of the box, support for additional providers is on the roadmap to ensure flexibility and future-proofing. +`gitc` supports both cloud-based and local AI providers, giving you flexibility and privacy options. -| Provider | Supported Models | Required Configuration | Status | -| --- | --- | --- | --- | -| **OpenAI** | `gpt-4o`, `gpt-4o-mini`, `gpt-3.5-turbo` | `api_key`, `model`, `url` (optional) | ✅ Supported (default) | -| **Grok (xAI)** | grok-3 (experimental) | `api_key`, `model`, `url` | 🧪 Experimental Support | -| **DeepSeek** | deepseek-rag (experimental) | `api_key`, `model`, `url` | 🧪 Experimental Support | -| **Gemini (Google)** | Coming Soon | - | 🔜 Planned | -| **Others** | - | - | 🧪 Under consideration | -> ℹ️ We're actively working on supporting multiple AI backends to give you more control, flexibility, and performance. Have a provider you'd like to see? [Open a discussion](https://github.com/dll-as/gitc/discussions)! +| Provider | Supported Models | Required Configuration | API Key | Status | +| --- | --- | --- | --- | --- | +| **OpenAI** | `gpt-4o`, `gpt-4o-mini`, `gpt-3.5-turbo` | `model`, `url` (optional) | ✅ Required | ✅ Stable | +| **Grok (xAI)** | `grok-3`, `grok-2` | `model`, `url` | ✅ Required | 🧪 Experimental | +| **DeepSeek** | `deepseek-rag`, `deepseek-coder` | `model`, `url` | ✅ Required | 🧪 Experimental | +| **Ollama (Local)** | Any Ollama model (`llama3.2`, `codellama`, `mistral`, etc.) | `model`, `url` (optional) | ❌ Not Required | ✅ Stable | +> **Local AI Benefits**: Ollama provides complete privacy, works offline, and has no API costs. Perfect for sensitive projects or when internet access is limited. ## 🤝 Contributing We welcome contributions! Please check out the [contributing guide](CONTRIBUTING.md) before making a PR. ## ⭐️ Star History -[![Star History Chart](https://api.star-history.com/svg?repos=dll-as/gitc&type=Date)](https://www.star-history.com/#dll-as/gitc&Date) +[![Star History Chart](https://api.star-history.com/svg?repos=dll-as/gitc&type=Date)](https://www.star-history.com/#dll-as/gitc&Date) \ No newline at end of file diff --git a/cmd/actions.go b/cmd/actions.go index d07688e..56b34d6 100644 --- a/cmd/actions.go +++ b/cmd/actions.go @@ -170,6 +170,8 @@ func (a *App) applyConfigDefaults(cfg *ai.Config) { cfg.Message.Model = "grok-3" case "deepseek": cfg.Message.Model = "deepseek-rag" + case "ollama": + cfg.Message.Model = "llama3.2" default: cfg.Message.Model = a.config.Model } @@ -200,6 +202,8 @@ func (a *App) applyConfigDefaults(cfg *ai.Config) { cfg.URL = "https://api.x.ai/v1/chat/completions" case "deepseek": cfg.URL = "https://api.deepseek.com/v1/chat/completions" + case "ollama": + cfg.URL = "http://localhost:11434/api/generate" default: cfg.URL = a.config.URL } diff --git a/cmd/commands.go b/cmd/commands.go index 62fb8d5..e4532f4 100644 --- a/cmd/commands.go +++ b/cmd/commands.go @@ -9,7 +9,7 @@ import ( ) // Version defines the current version of the gitc tool. -const Version = "0.4.0" +const Version = "0.5.0" var appInstance *App @@ -27,27 +27,22 @@ var Commands = &cli.App{ }, &cli.StringFlag{ Name: "provider", - Value: "openai", - Usage: "AI provider to use (openai, anthropic)", + Usage: "AI provider to use (openai, grok, deepseek, ollama)", }, &cli.StringFlag{ Name: "model", - Value: "gpt-4o-mini", Usage: "Specify the OpenAI model", }, &cli.StringFlag{ Name: "lang", - Value: "en", Usage: "Set commit message language (en, fa, ru, etc.)", }, &cli.IntFlag{ Name: "timeout", - Value: 10, Usage: "Set request timeout in seconds", }, &cli.IntFlag{ Name: "max-length", - Value: 200, Usage: "Set maximum output length of AI response", }, &cli.StringFlag{ @@ -92,13 +87,11 @@ var Commands = &cli.App{ &cli.IntFlag{ Name: "max-redirects", Aliases: []string{"r"}, - Value: 5, Usage: "Maximum number of HTTP redirects to follow", EnvVars: []string{"GITC_MAX_REDIRECTS"}, }, &cli.Float64Flag{ Name: "temperature", - Value: 0.7, Usage: "Control creativity (0.0 = deterministic, 1.0 = very creative)", }, &cli.BoolFlag{ diff --git a/internal/ai/ai.go b/internal/ai/ai.go index a2dd1f5..725129b 100644 --- a/internal/ai/ai.go +++ b/internal/ai/ai.go @@ -41,7 +41,7 @@ func (c *Config) Validate() error { if c.Provider == "" { return fmt.Errorf("provider is required") } - if c.APIKey == "" { + if c.Provider != "ollama" && c.APIKey == "" { return fmt.Errorf("API key is required") } if c.Timeout <= 0 { diff --git a/internal/ai/generic/generic.go b/internal/ai/generic/generic.go index 40dc1cf..60af091 100644 --- a/internal/ai/generic/generic.go +++ b/internal/ai/generic/generic.go @@ -18,6 +18,7 @@ const ( defaultOpenAIURL = "https://api.openai.com/v1/chat/completions" defaultGrokURL = "https://api.x.ai/v1/chat/completions" defaultDeepSeekURL = "https://api.deepseek.com/v1/chat/completions" + defaultOllamaURL = "http://localhost:11434//api/generate" systemPrompt = "You are an AI assistant that generates concise and meaningful Git commit messages." ) @@ -31,9 +32,10 @@ type GenericProvider struct { // NewGenericProvider creates a new provider for OpenAI-compatible APIs func NewGenericProvider(apiKey, proxy, url, provider string) (*GenericProvider, error) { - if apiKey == "" { + if apiKey == "" && provider != "ollama" { return nil, errors.New("API key is required") } + if url == "" { switch provider { case "openai": @@ -42,6 +44,8 @@ func NewGenericProvider(apiKey, proxy, url, provider string) (*GenericProvider, url = defaultGrokURL case "deepseek": url = defaultDeepSeekURL + case "ollama": + url = defaultOllamaURL default: return nil, fmt.Errorf("no default URL for provider: %s", provider) } @@ -63,25 +67,43 @@ func NewGenericProvider(apiKey, proxy, url, provider string) (*GenericProvider, }, nil } -type Request struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - MaxTokens int `json:"max_tokens,omitempty"` - Temperature float64 `json:"temperature,omitempty"` +// Request structures for different providers +type OpenAIRequest struct { + Model string `json:"model"` + Messages []OpenAIMessage `json:"messages"` + MaxTokens int `json:"max_tokens,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + Stream bool `json:"stream,omitempty"` +} + +type OllamaRequest struct { + Model string `json:"model"` + Prompt string `json:"prompt"` + Stream bool `json:"stream"` + // Options struct { + // Temperature float64 `json:"temperature,omitempty"` + // NumPredict int `json:"num_predict,omitempty"` + // } `json:"options,omitempty"` +} + +type OpenAIMessage struct { + Role string `json:"role"` + Content string `json:"content"` } -type Response struct { +type OpenAIResponse struct { Choices []struct { - Message Message `json:"message"` + Message OpenAIMessage `json:"message"` } `json:"choices"` Error struct { Message string `json:"message"` - } `json:"error,omitempty"` + } `json:"error"` } -type Message struct { - Role string `json:"role"` - Content string `json:"content"` +type OllamaResponse struct { + Model string `json:"model"` + Response string `json:"response"` + Error string `json:"error,omitempty"` } // GenerateCommitMessage generates a commit message using the API @@ -89,18 +111,29 @@ func (p *GenericProvider) GenerateCommitMessage(ctx context.Context, diff string // Adjust prompt based on provider if needed prompt := utils.GetPromptForSingleCommit(diff, opts) - reqBody := Request{ - Model: opts.Model, - // Store: false, - Messages: []Message{ - {"system", systemPrompt}, - {"user", prompt}, - }, - MaxTokens: max(512, opts.MaxLength), // More tokens for complete messages - Temperature: opts.Temperature, // Slightly creative but controlled - } + var reqBody []byte + var err error - jsonData, err := sonic.Marshal(reqBody) + if p.provider == "ollama" { + ollamaReq := OllamaRequest{ + Model: opts.Model, + Prompt: prompt, + Stream: false, + } + reqBody, err = sonic.Marshal(ollamaReq) + } else { + openaiReq := OpenAIRequest{ + Model: opts.Model, + Messages: []OpenAIMessage{ + {"system", systemPrompt}, + {"user", prompt}, + }, + MaxTokens: max(512, opts.MaxLength), // More tokens for complete messages + Temperature: opts.Temperature, // Slightly creative but controlled + Stream: false, + } + reqBody, err = sonic.Marshal(openaiReq) + } if err != nil { return "", fmt.Errorf("failed to encode JSON: %v", err) } @@ -110,9 +143,14 @@ func (p *GenericProvider) GenerateCommitMessage(ctx context.Context, diff string req.SetRequestURI(p.url) req.Header.SetMethod("POST") - req.Header.Set("Authorization", "Bearer "+p.apiKey) + + // Only set Authorization header for providers that need it + if p.provider != "ollama" && p.apiKey != "" { + req.Header.Set("Authorization", "Bearer "+p.apiKey) + } + req.Header.Set("Content-Type", "application/json") - req.SetBody(jsonData) + req.SetBody(reqBody) resp := fasthttp.AcquireResponse() defer fasthttp.ReleaseResponse(resp) @@ -121,26 +159,32 @@ func (p *GenericProvider) GenerateCommitMessage(ctx context.Context, diff string return "", fmt.Errorf("API request failed: %w", err) } - var res Response - if err = sonic.Unmarshal(resp.Body(), &res); err != nil { - return "", fmt.Errorf("failed to parse response: %v", err) - } + var commitMessage string + if p.provider == "ollama" { + var ollamaRes OllamaResponse + if err = sonic.Unmarshal(resp.Body(), &ollamaRes); err != nil { + return "", fmt.Errorf("failed to parse Ollama response: %v", err) + } - if statusCode := resp.StatusCode(); statusCode != fasthttp.StatusOK { - if res.Error.Message != "" { - return "", fmt.Errorf("API error [%d] from %s: %s", statusCode, p.provider, res.Error.Message) + if ollamaRes.Error != "" { + return "", fmt.Errorf("Ollama API error: %s", ollamaRes.Error) } - return "", fmt.Errorf("API returned status %d from %s: %s", statusCode, p.provider, resp.Body()) - } + commitMessage = strings.TrimSpace(ollamaRes.Response) + } else { + var openaiRes OpenAIResponse + if err = sonic.Unmarshal(resp.Body(), &openaiRes); err != nil { + return "", fmt.Errorf("failed to parse response: %v", err) + } - if res.Error.Message != "" { - return "", fmt.Errorf("API error from %s: %s", p.provider, res.Error.Message) - } else if len(res.Choices) == 0 { - return "", fmt.Errorf("no response from %s", p.provider) - } + if openaiRes.Error.Message != "" { + return "", fmt.Errorf("API error from %s: %s", p.provider, openaiRes.Error.Message) + } else if len(openaiRes.Choices) == 0 { + return "", fmt.Errorf("no response from %s", p.provider) + } - commitMessage := strings.TrimSpace(res.Choices[0].Message.Content) + commitMessage = strings.TrimSpace(openaiRes.Choices[0].Message.Content) + } if commitMessage == "" { return "", fmt.Errorf("empty commit message generated by %s", p.provider) } diff --git a/pkg/config/config.go b/pkg/config/config.go index c0be58c..af2b42e 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -71,7 +71,7 @@ func Load() (*Config, error) { } var cfg Config - if err := sonic.Unmarshal(data, &cfg); err != nil { + if err = sonic.Unmarshal(data, &cfg); err != nil { return nil, fmt.Errorf("failed to unmarshal config: %w", err) } @@ -80,9 +80,12 @@ func Load() (*Config, error) { if cfg.Provider == "" { cfg.Provider = defaults.Provider } - if cfg.APIKey == "" { + + // Ollama doesn't require API key, skip validation for it + if cfg.Provider != "ollama" && cfg.APIKey == "" { cfg.APIKey = defaults.APIKey } + if cfg.Model == "" { switch cfg.Provider { case "openai": @@ -91,6 +94,8 @@ func Load() (*Config, error) { cfg.Model = "grok-3" case "deepseek": cfg.Model = "deepseek-rag" + case "ollama": + cfg.Model = "llama3.2" default: cfg.Model = defaults.Model } @@ -103,6 +108,8 @@ func Load() (*Config, error) { cfg.URL = "https://api.x.ai/v1/chat/completions" case "deepseek": cfg.URL = "https://api.deepseek.com/v1/chat/completions" + case "ollama": + cfg.URL = "http://localhost:11434/api/generate" default: cfg.URL = defaults.URL } @@ -149,8 +156,8 @@ func (c *Config) Validate() error { if c.Provider == "" { return fmt.Errorf("provider is required") } - if c.APIKey == "" { - return fmt.Errorf("API key is required") + if c.Provider != "ollama" && c.APIKey == "" { + return fmt.Errorf("API key is required for %s provider", c.Provider) } if c.Timeout <= 0 { return fmt.Errorf("timeout must be positive")