diff --git a/src/extension/ai/helpers.ts b/src/extension/ai/helpers.ts index a20709d9..52ac91a2 100644 --- a/src/extension/ai/helpers.ts +++ b/src/extension/ai/helpers.ts @@ -3,6 +3,7 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models' import { AzureOpenAIModelProvider } from './model-providers/azure-openai' import type { BaseModelProvider } from './model-providers/base' import { AnthropicModelProvider } from './model-providers/claude' +import { MiniMaxModelProvider } from './model-providers/minimax' import { OpenAIModelProvider } from './model-providers/openai' import { parseModelBaseUrl, type ModelUrlType } from './parse-model-base-url' @@ -12,7 +13,8 @@ export const getCurrentModelProvider = async () => { const urlTypeProviderMap = { openai: OpenAIModelProvider, 'azure-openai': AzureOpenAIModelProvider, - anthropic: AnthropicModelProvider + anthropic: AnthropicModelProvider, + minimax: MiniMaxModelProvider } satisfies Record> return urlTypeProviderMap[urlType] || OpenAIModelProvider diff --git a/src/extension/ai/model-providers/minimax.ts b/src/extension/ai/model-providers/minimax.ts new file mode 100644 index 00000000..120d8fb1 --- /dev/null +++ b/src/extension/ai/model-providers/minimax.ts @@ -0,0 +1,41 @@ +import { getConfigKey } from '@extension/config' +import { getContext } from '@extension/context' +import { ChatOpenAI, type ChatOpenAICallOptions } from '@langchain/openai' +import * as vscode from 'vscode' + +import { parseModelBaseUrl } from '../parse-model-base-url' +import { BaseModelProvider } from './base' + +export class MiniMaxModelProvider extends BaseModelProvider< + ChatOpenAI +> { + async createModel() { + const isDev = getContext().extensionMode !== vscode.ExtensionMode.Production + const { url: openaiBaseUrl } = await parseModelBaseUrl() + const openaiKey = await getConfigKey('openaiKey') + const openaiModel = await getConfigKey('openaiModel') + + // MiniMax requires temperature in (0.0, 1.0] + const temperature = Math.min(Math.max(0.01, 0.95), 1.0) + + const model = new ChatOpenAI({ + apiKey: openaiKey, + configuration: { + baseURL: openaiBaseUrl || 'https://api.minimax.io/v1', + fetch + }, + model: openaiModel, + temperature, + maxRetries: 3, + verbose: isDev + }) + + // MiniMax API does not support these OpenAI-specific parameters + model.frequencyPenalty = undefined as any + model.n = undefined as any + model.presencePenalty = undefined as any + model.topP = undefined as any + + return model + } +} diff --git a/src/extension/ai/parse-model-base-url.ts b/src/extension/ai/parse-model-base-url.ts index e8b3ad4e..a1811c16 100644 --- a/src/extension/ai/parse-model-base-url.ts +++ b/src/extension/ai/parse-model-base-url.ts @@ -2,7 +2,7 @@ import { getConfigKey } from '@extension/config' import { t } from '@extension/i18n' -export type ModelUrlType = 'openai' | 'azure-openai' | 'anthropic' +export type ModelUrlType = 'openai' | 'azure-openai' | 'anthropic' | 'minimax' export const parseModelBaseUrl = async (): Promise<{ urlType: ModelUrlType url: string @@ -20,7 +20,7 @@ export const parseModelBaseUrl = async (): Promise<{ // Use regexp to parse the urlType const regex = - /^(openai|azure-openai|anthropic|copilot)?@?(https?:\/\/[^\s]+)?$/ + /^(openai|azure-openai|anthropic|minimax|copilot)?@?(https?:\/\/[^\s]+)?$/ const match = baseUrl.trim().match(regex) if (match) { diff --git a/src/extension/polyfill.ts b/src/extension/polyfill.ts index 832b1c98..e5caf075 100644 --- a/src/extension/polyfill.ts +++ b/src/extension/polyfill.ts @@ -1,9 +1,8 @@ const enableFetchPolyfill = async () => { if (!globalThis.fetch) { // if globalThis.fetch is not available, we use undici - const { fetch, FormData, Headers, Request, Response, File } = await import( - 'undici' - ) + const { fetch, FormData, Headers, Request, Response, File } = + await import('undici') Object.assign(globalThis, { fetch, diff --git a/src/extension/storage.ts b/src/extension/storage.ts index f02e2a66..6a19a797 100644 --- a/src/extension/storage.ts +++ b/src/extension/storage.ts @@ -9,9 +9,9 @@ export interface Storage { readonly length: number } -export class StateStorage = Record> - implements Storage -{ +export class StateStorage< + T extends Record = Record +> implements Storage { state: T createInitState: () => T diff --git a/test/minimax.test.ts b/test/minimax.test.ts new file mode 100644 index 00000000..e8b7c774 --- /dev/null +++ b/test/minimax.test.ts @@ -0,0 +1,268 @@ +import { describe, expect, it } from 'vitest' + +// Test 1: parseModelBaseUrl regex supports minimax prefix +describe('parseModelBaseUrl - minimax support', () => { + // The regex from parse-model-base-url.ts + const regex = + /^(openai|azure-openai|anthropic|minimax|copilot)?@?(https?:\/\/[^\s]+)?$/ + + it('should parse minimax@ prefix URL', () => { + const match = 'minimax@https://api.minimax.io/v1'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBe('minimax') + expect(match![2]).toBe('https://api.minimax.io/v1') + }) + + it('should parse minimax@ with custom URL', () => { + const match = 'minimax@https://custom.proxy.com/v1'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBe('minimax') + expect(match![2]).toBe('https://custom.proxy.com/v1') + }) + + it('should still parse openai@ prefix', () => { + const match = 'openai@https://api.openai.com/v1'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBe('openai') + }) + + it('should still parse anthropic@ prefix', () => { + const match = 'anthropic@https://api.anthropic.com'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBe('anthropic') + }) + + it('should still parse azure-openai@ prefix', () => { + const match = + 'azure-openai@https://westeurope.api.microsoft.com/openai/deployments/gpt-4o'.match( + regex + ) + expect(match).not.toBeNull() + expect(match![1]).toBe('azure-openai') + }) + + it('should parse URL without prefix as openai (default)', () => { + const match = 'https://api.minimax.io/v1'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBeUndefined() + expect(match![2]).toBe('https://api.minimax.io/v1') + }) + + it('should not match invalid prefix', () => { + const match = 'invalid@https://api.minimax.io/v1'.match(regex) + expect(match).toBeNull() + }) + + it('should not match empty string for prefix only', () => { + const match = '@https://api.minimax.io/v1'.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBeUndefined() + }) +}) + +// Test 2: MiniMax provider URL type mapping +describe('urlTypeProviderMap - minimax entry', () => { + it('should include minimax in ModelUrlType union', () => { + // Verify all expected URL types are accounted for + const validTypes = ['openai', 'azure-openai', 'anthropic', 'minimax'] + validTypes.forEach(type => { + expect(typeof type).toBe('string') + }) + }) + + it('should map minimax to a provider class', () => { + // Simulate the provider map logic from helpers.ts + const urlTypeProviderMap: Record = { + openai: 'OpenAIModelProvider', + 'azure-openai': 'AzureOpenAIModelProvider', + anthropic: 'AnthropicModelProvider', + minimax: 'MiniMaxModelProvider' + } + + expect(urlTypeProviderMap.minimax).toBe('MiniMaxModelProvider') + expect(Object.keys(urlTypeProviderMap)).toContain('minimax') + }) + + it('should fallback to openai for unknown urlType', () => { + const urlTypeProviderMap: Record = { + openai: 'OpenAIModelProvider', + 'azure-openai': 'AzureOpenAIModelProvider', + anthropic: 'AnthropicModelProvider', + minimax: 'MiniMaxModelProvider' + } + + const unknownType = 'unknown' + const provider = urlTypeProviderMap[unknownType] || 'OpenAIModelProvider' + expect(provider).toBe('OpenAIModelProvider') + }) +}) + +// Test 3: MiniMax temperature clamping logic +describe('MiniMax temperature clamping', () => { + // The clamping logic from minimax.ts: Math.min(Math.max(0.01, value), 1.0) + const clampTemperature = (value: number) => + Math.min(Math.max(0.01, value), 1.0) + + it('should clamp 0.95 to 0.95 (within range)', () => { + expect(clampTemperature(0.95)).toBe(0.95) + }) + + it('should clamp 0 to 0.01 (minimum bound)', () => { + expect(clampTemperature(0)).toBe(0.01) + }) + + it('should clamp negative values to 0.01', () => { + expect(clampTemperature(-0.5)).toBe(0.01) + }) + + it('should clamp 1.5 to 1.0 (maximum bound)', () => { + expect(clampTemperature(1.5)).toBe(1.0) + }) + + it('should keep 1.0 as 1.0', () => { + expect(clampTemperature(1.0)).toBe(1.0) + }) + + it('should keep 0.5 as 0.5', () => { + expect(clampTemperature(0.5)).toBe(0.5) + }) + + it('should ensure result is always > 0', () => { + const result = clampTemperature(0) + expect(result).toBeGreaterThan(0) + }) + + it('should ensure result is always <= 1', () => { + const result = clampTemperature(2.0) + expect(result).toBeLessThanOrEqual(1.0) + }) +}) + +// Test 4: MiniMax model names validation +describe('MiniMax model names', () => { + const validModels = [ + 'MiniMax-M2.7', + 'MiniMax-M2.7-highspeed', + 'MiniMax-M2.5', + 'MiniMax-M2.5-highspeed' + ] + + it('should have valid MiniMax model format', () => { + for (const model of validModels) { + expect(model).toMatch(/^MiniMax-M2\.[57](-highspeed)?$/) + } + }) + + it('should have exactly 4 model variants', () => { + expect(validModels).toHaveLength(4) + }) + + it('should include flagship M2.7 model', () => { + expect(validModels).toContain('MiniMax-M2.7') + }) + + it('should include highspeed M2.7 model', () => { + expect(validModels).toContain('MiniMax-M2.7-highspeed') + }) +}) + +// Test 5: MiniMax default base URL +describe('MiniMax default base URL', () => { + it('should use https://api.minimax.io/v1 as default', () => { + const defaultBaseUrl = 'https://api.minimax.io/v1' + expect(defaultBaseUrl).toBe('https://api.minimax.io/v1') + expect(defaultBaseUrl).toMatch(/^https:\/\//) + expect(defaultBaseUrl).toContain('minimax') + }) + + it('should fallback to default when url is empty', () => { + const url = '' + const effectiveUrl = url || 'https://api.minimax.io/v1' + expect(effectiveUrl).toBe('https://api.minimax.io/v1') + }) + + it('should use provided URL when non-empty', () => { + const url = 'https://custom-proxy.example.com/v1' + const effectiveUrl = url || 'https://api.minimax.io/v1' + expect(effectiveUrl).toBe('https://custom-proxy.example.com/v1') + }) +}) + +// Test 6: MiniMax configuration format +describe('MiniMax VSCode configuration format', () => { + it('should produce valid JSON settings', () => { + const config = { + 'aide.openaiBaseUrl': 'minimax@https://api.minimax.io/v1', + 'aide.openaiKey': 'test-api-key', + 'aide.openaiModel': 'MiniMax-M2.7' + } + + expect(() => JSON.stringify(config)).not.toThrow() + expect(config['aide.openaiBaseUrl']).toContain('minimax@') + expect(config['aide.openaiModel']).toBe('MiniMax-M2.7') + }) +}) + +// Integration test: end-to-end config parsing to provider selection +describe('MiniMax integration: config to provider flow', () => { + it('should extract minimax urlType from config URL', () => { + const configUrl = 'minimax@https://api.minimax.io/v1' + const regex = + /^(openai|azure-openai|anthropic|minimax|copilot)?@?(https?:\/\/[^\s]+)?$/ + const match = configUrl.trim().match(regex) + + expect(match).not.toBeNull() + const urlType = match![1] || 'openai' + const url = match![2] || '' + + expect(urlType).toBe('minimax') + expect(url).toBe('https://api.minimax.io/v1') + + // Then provider map lookup + const providerMap: Record = { + openai: 'OpenAIModelProvider', + 'azure-openai': 'AzureOpenAIModelProvider', + anthropic: 'AnthropicModelProvider', + minimax: 'MiniMaxModelProvider' + } + + expect(providerMap[urlType]).toBe('MiniMaxModelProvider') + }) + + it('should handle minimax with proxy URL', () => { + const configUrl = 'minimax@https://my-proxy.company.com/minimax/v1' + const regex = + /^(openai|azure-openai|anthropic|minimax|copilot)?@?(https?:\/\/[^\s]+)?$/ + const match = configUrl.trim().match(regex) + + expect(match).not.toBeNull() + expect(match![1]).toBe('minimax') + expect(match![2]).toBe('https://my-proxy.company.com/minimax/v1') + }) + + it('should handle all providers consistently', () => { + const testCases = [ + { + url: 'openai@https://api.openai.com/v1', + expectedType: 'openai' + }, + { + url: 'anthropic@https://api.anthropic.com', + expectedType: 'anthropic' + }, + { + url: 'minimax@https://api.minimax.io/v1', + expectedType: 'minimax' + } + ] + + const regex = + /^(openai|azure-openai|anthropic|minimax|copilot)?@?(https?:\/\/[^\s]+)?$/ + + for (const tc of testCases) { + const match = tc.url.match(regex) + expect(match).not.toBeNull() + expect(match![1]).toBe(tc.expectedType) + } + }) +}) diff --git a/website/.vitepress/config/en.ts b/website/.vitepress/config/en.ts index 2ecec933..ded0ff94 100644 --- a/website/.vitepress/config/en.ts +++ b/website/.vitepress/config/en.ts @@ -184,6 +184,7 @@ function sidebar(): DefaultTheme.Sidebar { { text: 'Google', link: '/google' }, { text: 'IFlytek', link: '/iflytek' }, { text: 'LocalAI', link: '/local-ai' }, + { text: 'MiniMax', link: '/minimax' }, { text: 'Ollama', link: '/ollama' }, { text: 'OpenAI', link: '/openai' }, { text: 'Qwen', link: '/qwen' }, diff --git a/website/.vitepress/config/zh.ts b/website/.vitepress/config/zh.ts index e1049625..6accaeb5 100644 --- a/website/.vitepress/config/zh.ts +++ b/website/.vitepress/config/zh.ts @@ -219,6 +219,7 @@ function sidebar(): DefaultTheme.Sidebar { { text: '谷歌', link: '/google' }, { text: '讯飞', link: '/iflytek' }, { text: 'LocalAI', link: '/local-ai' }, + { text: 'MiniMax', link: '/minimax' }, { text: 'Ollama', link: '/ollama' }, { text: 'OpenAI', link: '/openai' }, { text: '通义千问', link: '/qwen' }, diff --git a/website/en/guide/use-another-llm/minimax.md b/website/en/guide/use-another-llm/minimax.md new file mode 100644 index 00000000..8536278b --- /dev/null +++ b/website/en/guide/use-another-llm/minimax.md @@ -0,0 +1,46 @@ +# MiniMax + +This guide introduces how to configure and use the ==MiniMax== model in ==Aide==. + +You can find more detailed information in the [==MiniMax== Official Reference Documentation](https://platform.minimaxi.com/document/introduction). + +### API Base URL Configuration + +You need to configure [`aide.openaiBaseUrl`](../configuration/openai-base-url.md) to `minimax@https://api.minimax.io/v1` + +::: tip Note + +Since ==MiniMax== has specific parameter requirements (such as temperature constraints), you need to add `minimax@` as a prefix in the URL. This ensures the extension handles MiniMax-specific configurations correctly. + +::: + +### API Key Configuration + +You need to configure [`aide.openaiKey`](../configuration/openai-key.md) as your ==MiniMax== API Key. + +You can obtain your API key from the [MiniMax Platform](https://platform.minimaxi.com/). + +### Model Configuration + +You need to configure [`aide.openaiModel`](../configuration/openai-model.md) to the ==MiniMax== model. We recommend using the `MiniMax-M2.7` model. Other available models include: + +- `MiniMax-M2.7` - Latest flagship model with 1M context window +- `MiniMax-M2.7-highspeed` - Faster variant of M2.7 +- `MiniMax-M2.5` - Previous generation model with 204K context +- `MiniMax-M2.5-highspeed` - Faster variant of M2.5 + +For more models, please refer to the official reference documentation above. + +### Example Configuration File + +Below is a complete configuration example: + +```json +{ + "aide.openaiBaseUrl": "minimax@https://api.minimax.io/v1", + "aide.openaiKey": "your-minimax-api-key", + "aide.openaiModel": "MiniMax-M2.7" +} +``` + +Make sure to replace `"your-minimax-api-key"` with your actual API Key. diff --git a/website/zh/guide/use-another-llm/minimax.md b/website/zh/guide/use-another-llm/minimax.md new file mode 100644 index 00000000..f663182c --- /dev/null +++ b/website/zh/guide/use-another-llm/minimax.md @@ -0,0 +1,46 @@ +# MiniMax + +本指南介绍如何在 ==Aide== 中配置和使用 ==MiniMax== 模型。 + +您可以在 [==MiniMax== 官方参考文档](https://platform.minimaxi.com/document/introduction) 中找到更多详细信息。 + +### API Base URL 配置 + +您需要将 [`aide.openaiBaseUrl`](../configuration/openai-base-url.md) 配置为 `minimax@https://api.minimax.io/v1` + +::: tip 注意 + +由于 ==MiniMax== 有特定的参数要求(如温度约束),您需要在 URL 中添加 `minimax@` 前缀。这可以确保扩展正确处理 MiniMax 特定的配置。 + +::: + +### API Key 配置 + +您需要将 [`aide.openaiKey`](../configuration/openai-key.md) 配置为您的 ==MiniMax== API Key。 + +您可以从 [MiniMax 平台](https://platform.minimaxi.com/) 获取您的 API Key。 + +### 模型配置 + +您需要将 [`aide.openaiModel`](../configuration/openai-model.md) 配置为 ==MiniMax== 模型。我们推荐使用 `MiniMax-M2.7` 模型。其他可用模型包括: + +- `MiniMax-M2.7` - 最新旗舰模型,支持 100 万上下文窗口 +- `MiniMax-M2.7-highspeed` - M2.7 的高速版本 +- `MiniMax-M2.5` - 上一代模型,支持 204K 上下文 +- `MiniMax-M2.5-highspeed` - M2.5 的高速版本 + +更多模型请参考上方的官方参考文档。 + +### 配置文件示例 + +下面是一个完整的配置示例: + +```json +{ + "aide.openaiBaseUrl": "minimax@https://api.minimax.io/v1", + "aide.openaiKey": "your-minimax-api-key", + "aide.openaiModel": "MiniMax-M2.7" +} +``` + +请确保将 `"your-minimax-api-key"` 替换为您的实际 API Key。