diff --git a/.gitignore b/.gitignore index ed4133d..ab7a975 100644 --- a/.gitignore +++ b/.gitignore @@ -1,43 +1,61 @@ # dependencies (bun install) -node_modules +node_modules/ +.bun/ # output -out -dist +out/ +dist/ +build/ +release/ *.tgz +*.tsbuildinfo + +# vendor binaries (downloaded / generated) +vendor/ +apps/electron/vendor/ # code coverage -coverage +coverage/ *.lcov +.nyc_output/ + +# test outputs +test-results/ +playwright-report/ +reports/ # logs -logs -_.log +logs/ +*.log +*.tmp +*.swp report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json # dotenv environment variable files .env -.env.development.local -.env.test.local -.env.production.local -.env.local +.env.* # caches .eslintcache -.cache -*.tsbuildinfo +.cache/ # IntelliJ based IDEs -.idea +.idea/ + +# VSCode +.vscode/ # Finder (MacOS) folder config .DS_Store +Thumbs.db -# ignore craft-agents-oss folder -craft-agents-oss +# Codex local workspace state +.codex/ -# ignore what_are_humans_thinking folder -what_are_humans_thinking +# --- AI/Agent local instructions (do not commit) --- +AGENTS.md +CLAUDE.md -# vendor binaries -vendor \ No newline at end of file +# ignore local scratch folders +craft-agents-oss/ +what_are_humans_thinking/ diff --git a/apps/electron/src/main/lib/agent-orchestrator.ts b/apps/electron/src/main/lib/agent-orchestrator.ts index acd0b39..c917172 100644 --- a/apps/electron/src/main/lib/agent-orchestrator.ts +++ b/apps/electron/src/main/lib/agent-orchestrator.ts @@ -28,7 +28,7 @@ import type { ClaudeAgentQueryOptions } from './adapters/claude-agent-adapter' import { isPromptTooLongError } from './adapters/claude-agent-adapter' import { AgentEventBus } from './agent-event-bus' import { decryptApiKey, getChannelById, listChannels } from './channel-manager' -import { getAdapter, fetchTitle, normalizeAnthropicBaseUrlForSdk } from '@proma/core' +import { getAdapter, fetchTitle, normalizeAnthropicBaseUrlForSdk, normalizeOpenAIBaseUrl } from '@proma/core' import { getFetchFn } from './proxy-fetch' import { getEffectiveProxyUrl } from './proxy-settings-service' import { appendAgentMessage, updateAgentSessionMeta, getAgentSessionMeta, getAgentSessionMessages } from './agent-session-manager' @@ -564,9 +564,16 @@ export class AgentOrchestrator { const apiKey = decryptApiKey(channelId) const providerAdapter = getAdapter(channel.provider) const request = providerAdapter.buildTitleRequest({ - baseUrl: channel.baseUrl, + baseUrl: + channel.provider === 'openai' + ? normalizeOpenAIBaseUrl(channel.baseUrl) + : channel.baseUrl, apiKey, modelId, + apiFormat: + channel.provider === 'openai' || channel.provider === 'custom' + ? channel.apiFormat + : 'chat_completions', prompt: TITLE_PROMPT + userMessage, }) @@ -1461,4 +1468,4 @@ export class AgentOrchestrator { this.adapter.dispose() this.activeSessions.clear() } -} \ No newline at end of file +} diff --git a/apps/electron/src/main/lib/channel-manager.ts b/apps/electron/src/main/lib/channel-manager.ts index 83a7630..809275e 100644 --- a/apps/electron/src/main/lib/channel-manager.ts +++ b/apps/electron/src/main/lib/channel-manager.ts @@ -23,7 +23,7 @@ import type { } from '@proma/shared' import { getFetchFn } from './proxy-fetch' import { getEffectiveProxyUrl } from './proxy-settings-service' -import { normalizeAnthropicBaseUrl, normalizeBaseUrl } from '@proma/core' +import { normalizeAnthropicBaseUrl, normalizeBaseUrl, normalizeOpenAIBaseUrl, probeOpenAICompatibleModelsBaseUrl } from '@proma/core' /** 当前配置版本 */ const CONFIG_VERSION = 1 @@ -132,11 +132,17 @@ export function createChannel(input: ChannelCreateInput): Channel { const config = readConfig() const now = Date.now() + const effectiveApiFormat = + input.provider === 'openai' || input.provider === 'custom' + ? input.apiFormat + : undefined + const channel: Channel = { id: randomUUID(), name: input.name, provider: input.provider, baseUrl: input.baseUrl, + apiFormat: effectiveApiFormat, apiKey: encryptApiKey(input.apiKey), models: input.models, enabled: input.enabled, @@ -168,11 +174,18 @@ export function updateChannel(id: string, input: ChannelUpdateInput): Channel { const existing = config.channels[index]! + const nextProvider = input.provider ?? existing.provider + const effectiveApiFormat = + nextProvider === 'openai' || nextProvider === 'custom' + ? (input.apiFormat ?? existing.apiFormat) + : undefined + const updated: Channel = { ...existing, name: input.name ?? existing.name, - provider: input.provider ?? existing.provider, + provider: nextProvider, baseUrl: input.baseUrl ?? existing.baseUrl, + apiFormat: effectiveApiFormat, apiKey: input.apiKey ? encryptApiKey(input.apiKey) : existing.apiKey, models: input.models ?? existing.models, enabled: input.enabled ?? existing.enabled, @@ -240,14 +253,16 @@ export async function testChannel(channelId: string): Promise case 'anthropic': return await testAnthropic(channel.baseUrl, apiKey, proxyUrl) case 'openai': + return await testOpenAICompatible(channel.baseUrl, apiKey, proxyUrl, true) case 'deepseek': case 'moonshot': case 'zhipu': case 'minimax': case 'doubao': case 'qwen': - case 'custom': return await testOpenAICompatible(channel.baseUrl, apiKey, proxyUrl) + case 'custom': + return await testCustomOpenAICompatible(channel.baseUrl, apiKey, proxyUrl) case 'google': return await testGoogle(channel.baseUrl, apiKey, proxyUrl) default: @@ -297,8 +312,13 @@ async function testAnthropic(baseUrl: string, apiKey: string, proxyUrl?: string) /** * 测试 OpenAI 兼容 API 连接(OpenAI / DeepSeek / Custom) */ -async function testOpenAICompatible(baseUrl: string, apiKey: string, proxyUrl?: string): Promise { - const url = normalizeBaseUrl(baseUrl) +async function testOpenAICompatible( + baseUrl: string, + apiKey: string, + proxyUrl?: string, + ensureV1: boolean = false, +): Promise { + const url = ensureV1 ? normalizeOpenAIBaseUrl(baseUrl) : normalizeBaseUrl(baseUrl) const fetchFn = getFetchFn(proxyUrl) const response = await fetchFn(`${url}/models`, { @@ -320,6 +340,60 @@ async function testOpenAICompatible(baseUrl: string, apiKey: string, proxyUrl?: return { success: false, message: `请求失败 (${response.status}): ${text.slice(0, 200)}` } } +/** + * 测试 OpenAI 兼容 API 连接(Custom 专用:自动探测是否需要 /v1) + * + * 同时探测: + * - {baseUrl}/models + * - {baseUrl}/v1/models + * + * 以探测结果决定推荐的 Base URL(优先选择 /v1)。 + */ +async function testCustomOpenAICompatible(baseUrl: string, apiKey: string, proxyUrl?: string): Promise { + const fetchFn = getFetchFn(proxyUrl) + const baseNoV1 = normalizeBaseUrl(baseUrl) + const baseV1 = normalizeOpenAIBaseUrl(baseUrl) + + const { best, probes, resolvedBaseUrl } = await probeOpenAICompatibleModelsBaseUrl({ + baseUrl, + apiKey, + fetchFn, + }) + const suffixHint = resolvedBaseUrl && resolvedBaseUrl === baseV1 && baseV1 !== baseNoV1 + ? '(已自动补全 /v1)' + : '' + + if (best.ok) { + return { success: true, message: `连接成功${suffixHint}`, resolvedBaseUrl } + } + + if (best.status === 401) { + return { success: false, message: `API Key 无效${suffixHint}`, resolvedBaseUrl } + } + + if (best.status === 404 && probes.every((p) => p.status === 404)) { + return { + success: false, + message: '请求失败 (404): 未找到 /models 或 /v1/models,请检查 Base URL 是否正确', + resolvedBaseUrl, + } + } + + if (best.status > 0) { + return { + success: false, + message: `请求失败 (${best.status})${best.bodyPreview ? `: ${best.bodyPreview}` : ''}${suffixHint}`, + resolvedBaseUrl, + } + } + + return { + success: false, + message: `连接测试失败: ${best.error ?? '未知错误'}${suffixHint}`, + resolvedBaseUrl, + } +} + /** * 测试 Google Generative AI API 连接 */ @@ -359,14 +433,16 @@ export async function testChannelDirect(input: FetchModelsInput): Promise { - const url = normalizeBaseUrl(baseUrl) +async function fetchOpenAICompatibleModels( + baseUrl: string, + apiKey: string, + proxyUrl?: string, + ensureV1: boolean = false, +): Promise { + const url = ensureV1 ? normalizeOpenAIBaseUrl(baseUrl) : normalizeBaseUrl(baseUrl) const fetchFn = getFetchFn(proxyUrl) const response = await fetchFn(`${url}/models`, { @@ -521,6 +604,50 @@ async function fetchOpenAICompatibleModels(baseUrl: string, apiKey: string, prox } } +/** + * 从 OpenAI 兼容服务拉取模型列表(Custom:自动探测是否需要 /v1) + * + * 对于第三方 OpenAI 兼容服务,用户可能输入: + * - https://host + * - https://host/v1 + * + * 这里复用探测逻辑,优先选择更合适的 Base URL,再发起 /models 请求, + * 以提升“未先点测试连接就直接拉取模型”的成功率。 + */ +async function fetchCustomOpenAICompatibleModels( + baseUrl: string, + apiKey: string, + proxyUrl?: string, +): Promise { + const fetchFn = getFetchFn(proxyUrl) + const baseNoV1 = normalizeBaseUrl(baseUrl) + const baseV1 = normalizeOpenAIBaseUrl(baseUrl) + + const { best, probes, resolvedBaseUrl } = await probeOpenAICompatibleModelsBaseUrl({ + baseUrl, + apiKey, + fetchFn, + }) + + const suffixHint = resolvedBaseUrl === baseV1 && baseV1 !== baseNoV1 + ? '(已自动补全 /v1)' + : '' + + if (best.status === 404 && probes.every((p) => p.status === 404)) { + return { + success: false, + message: `请求失败 (404): 未找到 /models 或 /v1/models,请检查 Base URL 是否正确${suffixHint}`, + models: [], + } + } + + const result = await fetchOpenAICompatibleModels(resolvedBaseUrl, apiKey, proxyUrl, false) + return { + ...result, + message: `${result.message}${suffixHint}`, + } +} + /** * Google Generative AI 模型响应项 */ diff --git a/apps/electron/src/main/lib/chat-service.ts b/apps/electron/src/main/lib/chat-service.ts index 6fe592d..bf89f4f 100644 --- a/apps/electron/src/main/lib/chat-service.ts +++ b/apps/electron/src/main/lib/chat-service.ts @@ -15,11 +15,12 @@ import { randomUUID } from 'node:crypto' import type { WebContents } from 'electron' import { CHAT_IPC_CHANNELS } from '@proma/shared' -import type { ChatSendInput, ChatMessage, GenerateTitleInput, FileAttachment, ChatToolActivity } from '@proma/shared' +import type { ChatSendInput, ChatMessage, GenerateTitleInput, FileAttachment, ChatToolActivity, ChannelApiFormat } from '@proma/shared' import { getAdapter, streamSSE, fetchTitle, + normalizeOpenAIBaseUrl, } from '@proma/core' import type { ImageAttachmentData, ContinuationMessage } from '@proma/core' import { listChannels, decryptApiKey } from './channel-manager' @@ -268,9 +269,18 @@ export async function sendMessage( // 9. 工具续接循环 let continuationMessages: ContinuationMessage[] = [] + let previousResponseId: string | undefined let round = 0 /** 标记最近一轮是否执行了工具(用于判断是否需要最终响应轮) */ let pendingToolResults = false + let apiFormatForLoop: ChannelApiFormat = + channel.provider === 'openai' || channel.provider === 'custom' + ? (channel.apiFormat ?? 'chat_completions') + : 'chat_completions' + const effectiveBaseUrl = + channel.provider === 'openai' + ? normalizeOpenAIBaseUrl(channel.baseUrl) + : channel.baseUrl /** 流式事件处理器(工具轮和最终响应轮复用) */ const handleStreamEvent = (event: { type: string; delta?: string; toolCallId?: string; toolName?: string }): void => { @@ -309,9 +319,11 @@ export async function sendMessage( pendingToolResults = false const request = adapter.buildStreamRequest({ - baseUrl: channel.baseUrl, + baseUrl: effectiveBaseUrl, apiKey, modelId, + apiFormat: apiFormatForLoop, + previousResponseId, history: enrichedHistory, userMessage: enrichedUserMessage, systemMessage: effectiveSystemMessage, @@ -322,7 +334,7 @@ export async function sendMessage( continuationMessages: continuationMessages.length > 0 ? continuationMessages : undefined, }) - const { content, toolCalls, stopReason } = await streamSSE({ + const { content, toolCalls, stopReason, responseId } = await streamSSE({ request, adapter, signal: controller.signal, @@ -330,6 +342,24 @@ export async function sendMessage( onEvent: handleStreamEvent, }) + if (responseId) { + previousResponseId = responseId + } + + // Responses 模式下,如果需要继续 tool loop 但没拿到本轮 responseId, + // 下一轮无法可靠地通过 previous_response_id 续接。 + // 为保证可用性,这里自动降级到 Chat Completions 继续循环。 + if ( + apiFormatForLoop === 'responses' + && stopReason === 'tool_use' + && toolCalls.length > 0 + && !responseId + ) { + console.warn('[聊天服务] Responses 模式未收到 responseId,自动降级为 Chat Completions 继续 tool loop') + apiFormatForLoop = 'chat_completions' + previousResponseId = undefined + } + // 如果没有工具调用或不是 tool_use 停止,退出循环 if (!toolCalls || toolCalls.length === 0 || stopReason !== 'tool_use') { break @@ -372,9 +402,11 @@ export async function sendMessage( console.log(`[聊天服务] 工具轮次已达上限 (${MAX_TOOL_ROUNDS}),发起最终响应轮`) const finalRequest = adapter.buildStreamRequest({ - baseUrl: channel.baseUrl, + baseUrl: effectiveBaseUrl, apiKey, modelId, + apiFormat: apiFormatForLoop, + previousResponseId, history: enrichedHistory, userMessage: enrichedUserMessage, systemMessage: effectiveSystemMessage, @@ -549,9 +581,16 @@ export async function generateTitle(input: GenerateTitleInput): Promise = { custom: '/chat/completions', } +/** OpenAI 兼容供应商的 API 格式选项 */ +const OPENAI_API_FORMAT_OPTIONS: Array<{ value: ChannelApiFormat; label: string }> = [ + { value: 'chat_completions', label: 'Chat Completions(默认)' }, + { value: 'responses', label: 'Responses(OpenAI)' }, +] + /** * 生成 API 端点预览 URL * * Anthropic 特殊处理:如果 baseUrl 已包含 /v1,则不重复添加。 */ -function buildPreviewUrl(baseUrl: string, provider: ProviderType): string { +function buildPreviewUrl(baseUrl: string, provider: ProviderType, apiFormat: ChannelApiFormat): string { let trimmed = baseUrl.trim().replace(/\/+$/, '') if (provider === 'anthropic') { @@ -94,6 +102,16 @@ function buildPreviewUrl(baseUrl: string, provider: ProviderType): string { return `${trimmed}/v1/messages` } + // OpenAI 官方:Base URL 约定包含 /v1 + if (provider === 'openai') { + trimmed = normalizeOpenAIBaseUrl(trimmed) + } + + // OpenAI / Custom:允许切换为 /responses + if ((provider === 'openai' || provider === 'custom') && apiFormat === 'responses') { + return `${trimmed}/responses` + } + return `${trimmed}${PROVIDER_CHAT_PATHS[provider]}` } @@ -104,6 +122,7 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R const [name, setName] = React.useState(channel?.name ?? '') const [provider, setProvider] = React.useState(channel?.provider ?? 'anthropic') const [baseUrl, setBaseUrl] = React.useState(channel?.baseUrl ?? PROVIDER_DEFAULT_URLS.anthropic) + const [apiFormat, setApiFormat] = React.useState(channel?.apiFormat ?? 'chat_completions') const [apiKey, setApiKey] = React.useState('') const [showApiKey, setShowApiKey] = React.useState(false) const [models, setModels] = React.useState(channel?.models ?? []) @@ -143,6 +162,14 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R setProvider(p) setBaseUrl(PROVIDER_DEFAULT_URLS[p]) setTestResult(null) + // 默认行为: + // - OpenAI:优先使用 Responses API + // - OpenAI 兼容格式 / 其他:默认使用 Chat Completions + if (p === 'openai') { + setApiFormat('responses') + } else { + setApiFormat('chat_completions') + } } /** 添加模型 */ @@ -219,6 +246,10 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R apiKey, }) setTestResult(result) + // OpenAI 兼容格式:连接测试时可能会探测并返回更合适的 Base URL(例如自动补全 /v1) + if (provider === 'custom' && result.resolvedBaseUrl && result.resolvedBaseUrl !== baseUrl.trim()) { + setBaseUrl(result.resolvedBaseUrl) + } } catch (error) { setTestResult({ success: false, message: '测试请求失败' }) } finally { @@ -228,11 +259,17 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R /** 保存渠道 */ const saveChannel = async (): Promise => { + const effectiveApiFormat: ChannelApiFormat = + provider === 'openai' || provider === 'custom' + ? apiFormat + : 'chat_completions' + if (isEdit && channel) { await window.electronAPI.updateChannel(channel.id, { name, provider, baseUrl, + apiFormat: effectiveApiFormat, apiKey: apiKey || undefined, models, enabled, @@ -242,6 +279,7 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R name, provider, baseUrl, + apiFormat: effectiveApiFormat, apiKey, models, enabled, @@ -254,7 +292,7 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R const handleSubmit = async (e: React.FormEvent): Promise => { e.preventDefault() - if (!name.trim() || !apiKey.trim()) return + if (!name.trim() || (!isEdit && !apiKey.trim())) return setSaving(true) try { @@ -332,8 +370,17 @@ export function ChannelForm({ channel, onSaved, onCancel }: ChannelFormProps): R value={baseUrl} onChange={setBaseUrl} placeholder="https://api.example.com" - description={baseUrl.trim() ? `预览:${buildPreviewUrl(baseUrl, provider)}` : undefined} + description={baseUrl.trim() ? `预览:${buildPreviewUrl(baseUrl, provider, apiFormat)}` : undefined} /> + {(provider === 'openai' || provider === 'custom') && ( + setApiFormat(v as ChannelApiFormat)} + options={OPENAI_API_FORMAT_OPTIONS} + /> + )} {/* API Key + 测试连接同行 */}
diff --git a/bun.lock b/bun.lock index 8eef0a6..1fa0335 100644 --- a/bun.lock +++ b/bun.lock @@ -1,5 +1,6 @@ { "lockfileVersion": 1, + "configVersion": 0, "workspaces": { "": { "name": "proma", diff --git a/packages/core/src/providers/index.ts b/packages/core/src/providers/index.ts index 8c6e1d1..3860511 100644 --- a/packages/core/src/providers/index.ts +++ b/packages/core/src/providers/index.ts @@ -15,6 +15,7 @@ import { GoogleAdapter } from './google-adapter.ts' export * from './types.ts' export * from './sse-reader.ts' export * from './url-utils.ts' +export * from './openai-compat.ts' // 导出适配器类 export { AnthropicAdapter } from './anthropic-adapter.ts' diff --git a/packages/core/src/providers/openai-adapter.responses.test.ts b/packages/core/src/providers/openai-adapter.responses.test.ts new file mode 100644 index 0000000..af8a500 --- /dev/null +++ b/packages/core/src/providers/openai-adapter.responses.test.ts @@ -0,0 +1,190 @@ +import { describe, expect, test } from 'bun:test' +import type { ChatMessage, FileAttachment } from '@proma/shared' +import type { ImageAttachmentData, StreamRequestInput, ToolDefinition } from './types.ts' +import { OpenAIAdapter } from './openai-adapter.ts' + +function makeHistory(): ChatMessage[] { + const now = Date.now() + return [ + { id: 'u1', role: 'user', content: '你好', createdAt: now - 2_000 }, + { id: 'a1', role: 'assistant', content: '你好,有什么可以帮你?', createdAt: now - 1_000 }, + ] +} + +function makeImageAttachment(): FileAttachment { + return { + id: 'att_1', + filename: 'a.png', + mediaType: 'image/png', + localPath: 'c/att.png', + size: 123, + } +} + +const readImageAttachments = (attachments?: FileAttachment[]): ImageAttachmentData[] => { + if (!attachments || attachments.length === 0) return [] + return attachments.map((att) => ({ + mediaType: att.mediaType, + data: 'AAAA', + })) +} + +describe('OpenAIAdapter (Responses API)', () => { + test('Given apiFormat=responses When buildStreamRequest Then uses /responses', () => { + const adapter = new OpenAIAdapter() + const input: StreamRequestInput = { + baseUrl: 'https://api.openai.com/v1', + apiKey: 'k', + modelId: 'gpt-5', + apiFormat: 'responses', + history: makeHistory(), + userMessage: '请总结一下', + readImageAttachments, + } + + const req = adapter.buildStreamRequest(input) + expect(req.url.endsWith('/responses')).toBe(true) + }) + + test('Given systemMessage When buildStreamRequest Then sets instructions', () => { + const adapter = new OpenAIAdapter() + const input: StreamRequestInput = { + baseUrl: 'https://api.openai.com/v1', + apiKey: 'k', + modelId: 'gpt-5', + apiFormat: 'responses', + history: makeHistory(), + userMessage: '请总结一下', + systemMessage: '你是一个严谨的助手', + readImageAttachments, + } + + const req = adapter.buildStreamRequest(input) + const body = JSON.parse(req.body) as Record + expect(body.instructions).toBe('你是一个严谨的助手') + }) + + test('Given tools When buildStreamRequest Then tools are flat (no function nesting)', () => { + const adapter = new OpenAIAdapter() + const tools: ToolDefinition[] = [ + { + name: 'get_weather', + description: 'Get weather by city', + parameters: { + type: 'object', + properties: { + city: { type: 'string', description: 'City name' }, + }, + required: ['city'], + }, + }, + ] + + const input: StreamRequestInput = { + baseUrl: 'https://api.openai.com/v1', + apiKey: 'k', + modelId: 'gpt-5', + apiFormat: 'responses', + history: makeHistory(), + userMessage: '上海天气?', + tools, + readImageAttachments, + } + + const req = adapter.buildStreamRequest(input) + const body = JSON.parse(req.body) as Record + const bodyTools = body.tools as Array> + expect(bodyTools[0]?.type).toBe('function') + expect(bodyTools[0]?.name).toBe('get_weather') + expect(bodyTools[0]?.description).toBe('Get weather by city') + expect(bodyTools[0]?.parameters).toBeTruthy() + expect((bodyTools[0] as Record).function).toBeUndefined() + }) + + test('Given current message has image When buildStreamRequest Then includes input_image with data URL', () => { + const adapter = new OpenAIAdapter() + const attachments = [makeImageAttachment()] + const input: StreamRequestInput = { + baseUrl: 'https://api.openai.com/v1', + apiKey: 'k', + modelId: 'gpt-5', + apiFormat: 'responses', + history: makeHistory(), + userMessage: '这张图里是什么?', + attachments, + readImageAttachments, + } + + const req = adapter.buildStreamRequest(input) + const body = JSON.parse(req.body) as Record + const inputArr = body.input as Array> + const first = inputArr[0] as Record + const content = first.content as Array> + + const imageItem = content.find((c) => c.type === 'input_image') + expect(imageItem).toBeTruthy() + expect(typeof imageItem?.image_url).toBe('string') + expect((imageItem?.image_url as string).startsWith('data:image/png;base64,')).toBe(true) + }) + + test('Given response.output_text.delta When parseSSELine Then returns chunk', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ type: 'response.output_text.delta', delta: 'hi' })) + expect(events).toEqual([{ type: 'chunk', delta: 'hi' }]) + }) + + test('Given response.output_item.added function_call When parseSSELine Then returns tool_call_start', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ + type: 'response.output_item.added', + item: { type: 'function_call', call_id: 'call_1', name: 'my_tool' }, + })) + expect(events).toEqual([{ type: 'tool_call_start', toolCallId: 'call_1', toolName: 'my_tool' }]) + }) + + test('Given response.output_item.added with response_id When parseSSELine Then returns meta + tool_call_start', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ + type: 'response.output_item.added', + response_id: 'resp_1', + item: { type: 'function_call', call_id: 'call_1', name: 'my_tool' }, + })) + expect(events).toEqual([ + { type: 'meta', responseId: 'resp_1' }, + { type: 'tool_call_start', toolCallId: 'call_1', toolName: 'my_tool' }, + ]) + }) + + test('Given response.function_call_arguments.delta When parseSSELine Then returns tool_call_delta', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ + type: 'response.function_call_arguments.delta', + call_id: 'call_1', + delta: '{"q":"x"}', + })) + expect(events).toEqual([{ type: 'tool_call_delta', toolCallId: 'call_1', argumentsDelta: '{"q":"x"}' }]) + }) + + test('Given response.function_call_arguments.delta with response_id When parseSSELine Then returns meta + tool_call_delta', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ + type: 'response.function_call_arguments.delta', + response_id: 'resp_1', + call_id: 'call_1', + delta: '{"q":"x"}', + })) + expect(events).toEqual([ + { type: 'meta', responseId: 'resp_1' }, + { type: 'tool_call_delta', toolCallId: 'call_1', argumentsDelta: '{"q":"x"}' }, + ]) + }) + + test('Given response.created When parseSSELine Then returns meta(responseId)', () => { + const adapter = new OpenAIAdapter() + const events = adapter.parseSSELine(JSON.stringify({ + type: 'response.created', + response: { id: 'resp_1' }, + })) + expect(events).toEqual([{ type: 'meta', responseId: 'resp_1' }]) + }) +}) diff --git a/packages/core/src/providers/openai-adapter.ts b/packages/core/src/providers/openai-adapter.ts index 0bc0157..71f6207 100644 --- a/packages/core/src/providers/openai-adapter.ts +++ b/packages/core/src/providers/openai-adapter.ts @@ -1,7 +1,7 @@ /** * OpenAI 兼容供应商适配器 * - * 实现 OpenAI Chat Completions API 的消息转换、请求构建和 SSE 解析。 + * 实现 OpenAI Chat Completions 与 Responses API 的消息转换、请求构建和 SSE 解析。 * 同时适用于 OpenAI、DeepSeek 和自定义 OpenAI 兼容 API。 * 特点: * - 角色:system / user / assistant / tool @@ -10,6 +10,7 @@ * - 认证:Authorization: Bearer */ +import type { ChatMessage } from '@proma/shared' import type { ProviderAdapter, ProviderRequest, @@ -67,6 +68,35 @@ interface OpenAITitleResponse { choices?: Array<{ message?: { content?: string } }> } +// ===== Responses API 相关类型(最小子集) ===== + +/** Responses SSE 数据块(最小字段集,按需扩展) */ +interface OpenAIResponsesChunkData { + type?: string + delta?: string + id?: string + response_id?: string + response?: { id?: string } + output_index?: number + call_id?: string + item?: { + type?: string + id?: string + call_id?: string + name?: string + arguments?: string + } +} + +/** Responses 标题响应(最小字段集) */ +interface OpenAIResponsesTitleResponse { + output_text?: string + output?: Array<{ + type?: string + content?: Array<{ type?: string; text?: string }> + }> +} + // ===== 消息转换 ===== /** @@ -135,6 +165,26 @@ function toOpenAIMessages(input: StreamRequestInput): OpenAIMessage[] { return messages } +/** + * 将历史消息 + 当前用户消息拼接为转录文本(用于 Responses API) + * + * 注意:Responses 的 message input item role 不包含 assistant, + * 这里通过单条 user message 的 input_text 承载历史上下文。 + */ +function toTranscriptText(history: ChatMessage[], userMessage: string): string { + const lines: string[] = [] + for (const msg of history) { + if (msg.role === 'system') continue + if (msg.role === 'user') { + lines.push(`User: ${msg.content}`) + } else if (msg.role === 'assistant') { + lines.push(`Assistant: ${msg.content}`) + } + } + lines.push(`User: ${userMessage}`) + return lines.join('\n\n') +} + /** * 将工具定义转换为 OpenAI 格式 */ @@ -149,6 +199,53 @@ function toOpenAITools(tools: ToolDefinition[]): Array> })) } +/** + * 将工具定义转换为 OpenAI Responses API 格式(扁平 function tool) + * + * 参考:/responses/create 中 tools 示例为 { type:'function', name, description, parameters, strict }。 + */ +function toOpenAIResponsesTools(tools: ToolDefinition[]): Array> { + return tools.map((tool) => ({ + type: 'function', + name: tool.name, + description: tool.description, + parameters: tool.parameters, + })) +} + +/** + * 从 continuationMessages 中提取最近一次的工具结果(避免重复发送历史 tool outputs) + */ +function getLatestToolResults( + continuationMessages: ContinuationMessage[] | undefined, +): Array<{ toolCallId: string; content: string; isError?: boolean }> | null { + if (!continuationMessages || continuationMessages.length === 0) return null + for (let i = continuationMessages.length - 1; i >= 0; i--) { + const msg = continuationMessages[i]! + if (msg.role === 'tool') { + return msg.results + } + } + return null +} + +/** + * 构建 Responses API 的 input message content 列表(input_text + input_image) + */ +function buildResponsesMessageContent( + text: string, + imageData: ImageAttachmentData[], +): Array> { + const content: Array> = [{ type: 'input_text', text }] + for (const img of imageData) { + content.push({ + type: 'input_image', + image_url: `data:${img.mediaType};base64,${img.data}`, + }) + } + return content +} + /** * 将续接消息追加到 OpenAI 消息列表 */ @@ -186,6 +283,60 @@ export class OpenAIAdapter implements ProviderAdapter { buildStreamRequest(input: StreamRequestInput): ProviderRequest { const url = normalizeBaseUrl(input.baseUrl) + + // ===== Responses API ===== + if (input.apiFormat === 'responses') { + const latestToolResults = getLatestToolResults(input.continuationMessages) + + const bodyObj: Record = { + model: input.modelId, + stream: true, + } + + // systemMessage 映射到 instructions(保持每轮一致,避免依赖服务端链式继承) + if (input.systemMessage) { + bodyObj.instructions = input.systemMessage + } + + // 工具定义(Responses: 扁平 function tool) + if (input.tools && input.tools.length > 0) { + bodyObj.tools = toOpenAIResponsesTools(input.tools) + } + + // tool loop:仅发送最近一次 tool outputs,并通过 previous_response_id 续接 + if (latestToolResults && latestToolResults.length > 0) { + if (input.previousResponseId) { + bodyObj.previous_response_id = input.previousResponseId + } + bodyObj.input = latestToolResults.map((tr) => ({ + type: 'function_call_output', + call_id: tr.toolCallId, + output: tr.content, + })) + } else { + // 首轮:发送转录文本 + 当前消息图片(仅当前轮图片附件) + const transcript = toTranscriptText(input.history, input.userMessage) + const currentImages = input.readImageAttachments(input.attachments) + bodyObj.input = [ + { + type: 'message', + role: 'user', + content: buildResponsesMessageContent(transcript, currentImages), + }, + ] + } + + return { + url: `${url}/responses`, + headers: { + Authorization: `Bearer ${input.apiKey}`, + 'content-type': 'application/json', + }, + body: JSON.stringify(bodyObj), + } + } + + // ===== Chat Completions(兼容) ===== const messages = toOpenAIMessages(input) const bodyObj: Record = { @@ -216,7 +367,78 @@ export class OpenAIAdapter implements ProviderAdapter { parseSSELine(jsonLine: string): StreamEvent[] { try { - const chunk = JSON.parse(jsonLine) as OpenAIChunkData + const parsed = JSON.parse(jsonLine) as OpenAIChunkData & OpenAIResponsesChunkData + const eventType = typeof parsed.type === 'string' ? parsed.type : null + + // ===== Responses SSE 事件 ===== + if (eventType && eventType.startsWith('response.')) { + const maybeResponseId = + parsed.response?.id + || parsed.response_id + || parsed.id + const events: StreamEvent[] = [] + + if (eventType === 'response.created') { + const responseId = + parsed.response?.id + || parsed.response_id + || parsed.id + if (responseId) { + events.push({ type: 'meta', responseId }) + } + return events + } + + if (eventType === 'response.output_text.delta' && parsed.delta) { + events.push({ type: 'chunk', delta: parsed.delta }) + return events + } + + if (eventType === 'response.output_item.added' && parsed.item?.type === 'function_call') { + const toolCallId = parsed.item.call_id || parsed.item.id || `tc_${String(parsed.output_index ?? 0)}` + const toolName = parsed.item.name || 'unknown_tool' + + // 兼容实现:有些服务可能不发 response.created,而是在其它事件里携带 response_id + if (maybeResponseId) { + events.push({ type: 'meta', responseId: maybeResponseId }) + } + + events.push({ + type: 'tool_call_start', + toolCallId, + toolName, + }) + + // 某些实现可能在 added 事件中直接携带完整 arguments + if (parsed.item.arguments) { + events.push({ + type: 'tool_call_delta', + toolCallId, + argumentsDelta: parsed.item.arguments, + }) + } + + return events + } + + if (eventType === 'response.function_call_arguments.delta' && parsed.delta) { + // 兼容实现:有些服务可能不发 response.created,而是在其它事件里携带 response_id + if (maybeResponseId) { + events.push({ type: 'meta', responseId: maybeResponseId }) + } + events.push({ + type: 'tool_call_delta', + toolCallId: parsed.call_id || parsed.item?.call_id || '', + argumentsDelta: parsed.delta, + }) + return events + } + + return [] + } + + // ===== Chat Completions SSE 数据 ===== + const chunk = parsed as OpenAIChunkData const delta = chunk.choices?.[0]?.delta const events: StreamEvent[] = [] @@ -266,6 +488,22 @@ export class OpenAIAdapter implements ProviderAdapter { buildTitleRequest(input: TitleRequestInput): ProviderRequest { const url = normalizeBaseUrl(input.baseUrl) + // Responses API:非流式生成标题 + if (input.apiFormat === 'responses') { + return { + url: `${url}/responses`, + headers: { + Authorization: `Bearer ${input.apiKey}`, + 'content-type': 'application/json', + }, + body: JSON.stringify({ + model: input.modelId, + input: input.prompt, + max_output_tokens: 60, + }), + } + } + return { url: `${url}/chat/completions`, headers: { @@ -281,6 +519,20 @@ export class OpenAIAdapter implements ProviderAdapter { } parseTitleResponse(responseBody: unknown): string | null { + const maybeResponses = responseBody as OpenAIResponsesTitleResponse + if (typeof maybeResponses.output_text === 'string') { + return maybeResponses.output_text + } + + // fallback:从 output message 中提取第一段 output_text + const firstText = maybeResponses.output + ?.find((it) => it.type === 'message') + ?.content?.find((c) => c.type === 'output_text') + ?.text + if (typeof firstText === 'string') { + return firstText + } + const data = responseBody as OpenAITitleResponse return data.choices?.[0]?.message?.content ?? null } diff --git a/packages/core/src/providers/openai-compat.test.ts b/packages/core/src/providers/openai-compat.test.ts new file mode 100644 index 0000000..7749b40 --- /dev/null +++ b/packages/core/src/providers/openai-compat.test.ts @@ -0,0 +1,77 @@ +import { describe, expect, test } from 'bun:test' +import { probeOpenAICompatibleModelsBaseUrl } from './openai-compat.ts' + +function makeFetch(map: Record): typeof fetch { + return (async (input: RequestInfo | URL) => { + const url = typeof input === 'string' ? input : input.toString() + const hit = map[url] + if (hit === 'throw') { + throw new Error('network error') + } + const status = typeof hit === 'number' ? hit : 404 + const body = status === 200 ? '{"data":[]}' : `status ${status}` + return new Response(body, { status }) + }) as unknown as typeof fetch +} + +describe('probeOpenAICompatibleModelsBaseUrl', () => { + test('Given /models ok and /v1/models 404 When probe Then chooses non-v1 baseUrl', async () => { + const baseUrl = 'https://api.example.com' + const fetchFn = makeFetch({ + 'https://api.example.com/models': 200, + 'https://api.example.com/v1/models': 404, + }) + + const result = await probeOpenAICompatibleModelsBaseUrl({ baseUrl, apiKey: 'k', fetchFn }) + expect(result.resolvedBaseUrl).toBe('https://api.example.com') + expect(result.best.ok).toBe(true) + }) + + test('Given /models 404 and /v1/models ok When probe Then chooses /v1 baseUrl', async () => { + const baseUrl = 'https://api.example.com' + const fetchFn = makeFetch({ + 'https://api.example.com/models': 404, + 'https://api.example.com/v1/models': 200, + }) + + const result = await probeOpenAICompatibleModelsBaseUrl({ baseUrl, apiKey: 'k', fetchFn }) + expect(result.resolvedBaseUrl).toBe('https://api.example.com/v1') + expect(result.best.ok).toBe(true) + }) + + test('Given both endpoints 401 When probe Then prefers /v1 baseUrl', async () => { + const baseUrl = 'https://api.example.com' + const fetchFn = makeFetch({ + 'https://api.example.com/models': 401, + 'https://api.example.com/v1/models': 401, + }) + + const result = await probeOpenAICompatibleModelsBaseUrl({ baseUrl, apiKey: 'bad', fetchFn }) + expect(result.resolvedBaseUrl).toBe('https://api.example.com/v1') + expect(result.best.status).toBe(401) + }) + + test('Given both endpoints 403 When probe Then prefers /v1 baseUrl', async () => { + const baseUrl = 'https://api.example.com' + const fetchFn = makeFetch({ + 'https://api.example.com/models': 403, + 'https://api.example.com/v1/models': 403, + }) + + const result = await probeOpenAICompatibleModelsBaseUrl({ baseUrl, apiKey: 'bad', fetchFn }) + expect(result.resolvedBaseUrl).toBe('https://api.example.com/v1') + expect(result.best.status).toBe(403) + }) + + test('Given /models throws and /v1/models 404 When probe Then chooses /v1 baseUrl (less bad)', async () => { + const baseUrl = 'https://api.example.com' + const fetchFn = makeFetch({ + 'https://api.example.com/models': 'throw', + 'https://api.example.com/v1/models': 404, + }) + + const result = await probeOpenAICompatibleModelsBaseUrl({ baseUrl, apiKey: 'k', fetchFn }) + expect(result.resolvedBaseUrl).toBe('https://api.example.com/v1') + expect(result.probes).toHaveLength(2) + }) +}) diff --git a/packages/core/src/providers/openai-compat.ts b/packages/core/src/providers/openai-compat.ts new file mode 100644 index 0000000..aa2a4bd --- /dev/null +++ b/packages/core/src/providers/openai-compat.ts @@ -0,0 +1,110 @@ +/** + * OpenAI 兼容服务探测工具 + * + * 主要用于第三方 OpenAI 兼容服务的 Base URL 兼容性探测: + * - 有的服务以 https://host/v1 作为根路径(OpenAI 官方) + * - 有的服务以 https://host 作为根路径(不带 /v1) + * + * 这里提供一个轻量探测:并发请求 /models 与 /v1/models,通过结果选择更合适的 Base URL。 + * 仅用于“连接测试/辅助回填”,不影响实际请求构建逻辑。 + */ + +import { normalizeBaseUrl, normalizeOpenAIBaseUrl } from './url-utils.ts' + +/** /models 探测结果(最小字段集) */ +export interface OpenAIModelsProbe { + baseUrl: string + ok: boolean + status: number + bodyPreview?: string + error?: string +} + +/** 探测汇总结果 */ +export interface OpenAIModelsProbeResult { + /** 探测到的候选 baseUrl(去重后) */ + probes: OpenAIModelsProbe[] + /** 最优探测结果 */ + best: OpenAIModelsProbe + /** 推荐使用的 baseUrl */ + resolvedBaseUrl: string +} + +export function scoreOpenAIModelsProbe(probe: OpenAIModelsProbe): number { + if (probe.ok) return 3 + // 401/403:鉴权失败但 endpoint 存在,可用于判断 baseUrl 是否正确 + if (probe.status === 401 || probe.status === 403) return 2 + if (probe.status === 404) return 0 + if (probe.status > 0) return 1 + return -1 +} + +export function chooseBestOpenAIModelsProbe( + probes: OpenAIModelsProbe[], + preferBaseUrl?: string, +): OpenAIModelsProbe { + if (probes.length === 0) { + throw new Error('No probes provided') + } + + return probes.reduce((prev, cur) => { + const sPrev = scoreOpenAIModelsProbe(prev) + const sCur = scoreOpenAIModelsProbe(cur) + if (sCur !== sPrev) return sCur > sPrev ? cur : prev + if (preferBaseUrl && cur.baseUrl === preferBaseUrl && prev.baseUrl !== preferBaseUrl) return cur + return prev + }) +} + +async function probeOpenAIModels( + baseUrl: string, + apiKey: string, + fetchFn: typeof globalThis.fetch, +): Promise { + try { + const response = await fetchFn(`${baseUrl}/models`, { + method: 'GET', + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }) + + const bodyPreview = response.ok + ? undefined + : (await response.text().catch(() => '')).slice(0, 200) + + return { baseUrl, ok: response.ok, status: response.status, bodyPreview } + } catch (error) { + const msg = error instanceof Error ? error.message : '未知错误' + return { baseUrl, ok: false, status: -1, error: msg } + } +} + +/** + * 探测 OpenAI 兼容服务 Base URL(/models 与 /v1/models) + * + * @param baseUrl 用户输入的 Base URL(可能带/不带 /v1) + * @param apiKey API Key(用于鉴权,401 也能用来判断 endpoint 存在) + * @param fetchFn 可注入 fetch(默认使用全局 fetch) + */ +export async function probeOpenAICompatibleModelsBaseUrl(options: { + baseUrl: string + apiKey: string + fetchFn?: typeof globalThis.fetch +}): Promise { + const { baseUrl, apiKey, fetchFn = fetch } = options + const baseNoV1 = normalizeBaseUrl(baseUrl) + const baseV1 = normalizeOpenAIBaseUrl(baseUrl) + + const candidates = Array.from(new Set([baseNoV1, baseV1])) + const probes = await Promise.all(candidates.map((b) => probeOpenAIModels(b, apiKey, fetchFn))) + + // 同分时优先 /v1(baseV1) + const best = chooseBestOpenAIModelsProbe(probes, baseV1) + + return { + probes, + best, + resolvedBaseUrl: best.baseUrl, + } +} diff --git a/packages/core/src/providers/sse-reader.responses.test.ts b/packages/core/src/providers/sse-reader.responses.test.ts new file mode 100644 index 0000000..5f01c9f --- /dev/null +++ b/packages/core/src/providers/sse-reader.responses.test.ts @@ -0,0 +1,96 @@ +import { describe, expect, test } from 'bun:test' +import { streamSSE } from './sse-reader.ts' +import { OpenAIAdapter } from './openai-adapter.ts' + +function makeSSEStream(chunks: string[]): ReadableStream { + const encoder = new TextEncoder() + return new ReadableStream({ + start(controller) { + for (const chunk of chunks) { + controller.enqueue(encoder.encode(chunk)) + } + controller.close() + }, + }) +} + +describe('streamSSE (Responses API aggregation)', () => { + test('Given Responses SSE with responseId/content/toolCalls When streamSSE Then aggregates responseId/toolCalls/stopReason', async () => { + const adapter = new OpenAIAdapter() + + const sseLines = [ + `data: ${JSON.stringify({ type: 'response.created', response: { id: 'resp_123' } })}\n\n`, + `data: ${JSON.stringify({ type: 'response.output_text.delta', delta: 'Hello' })}\n`, + `data: ${JSON.stringify({ type: 'response.output_text.delta', delta: ' world' })}\n\n`, + `data: ${JSON.stringify({ + type: 'response.output_item.added', + item: { type: 'function_call', call_id: 'call_1', name: 'my_tool' }, + })}\n`, + `data: ${JSON.stringify({ + type: 'response.function_call_arguments.delta', + call_id: 'call_1', + delta: '{"q":"x"}', + })}\n\n`, + `data: [DONE]\n\n`, + ] + + const response = new Response(makeSSEStream([sseLines.join('')]), { status: 200 }) + const fetchFn: typeof fetch = async (_input, _init) => response + + const events: Array<{ type: string }> = [] + const result = await streamSSE({ + request: { url: 'https://example.com/responses', headers: {}, body: '{}' }, + adapter, + fetchFn, + onEvent: (e) => events.push({ type: e.type }), + }) + + expect(result.responseId).toBe('resp_123') + expect(result.content).toBe('Hello world') + expect(result.toolCalls).toHaveLength(1) + expect(result.toolCalls[0]?.id).toBe('call_1') + expect(result.toolCalls[0]?.name).toBe('my_tool') + expect(result.toolCalls[0]?.arguments).toEqual({ q: 'x' }) + expect(result.stopReason).toBe('tool_use') + + // 保底:done 事件必然触发(无论服务端是否显式给出 stopReason) + expect(events.some((e) => e.type === 'done')).toBe(true) + }) + + test('Given adapter emits done When streamSSE Then forwards done only once', async () => { + const adapter = new OpenAIAdapter() + + // Chat Completions:finish_reason=tool_calls 会被 adapter 解析为 done(tool_use) + const sseLines = [ + `data: ${JSON.stringify({ + choices: [ + { + delta: { + tool_calls: [ + { index: 0, id: 'call_1', function: { name: 'my_tool', arguments: '{"q":"x"}' } }, + ], + }, + finish_reason: 'tool_calls', + }, + ], + })}\n\n`, + `data: [DONE]\n\n`, + ] + + const response = new Response(makeSSEStream([sseLines.join('')]), { status: 200 }) + const fetchFn: typeof fetch = async (_input, _init) => response + + const events: Array<{ type: string }> = [] + const result = await streamSSE({ + request: { url: 'https://example.com/chat/completions', headers: {}, body: '{}' }, + adapter, + fetchFn, + onEvent: (e) => events.push({ type: e.type }), + }) + + const doneCount = events.filter((e) => e.type === 'done').length + expect(doneCount).toBe(1) + expect(result.stopReason).toBe('tool_use') + expect(result.toolCalls[0]?.id).toBe('call_1') + }) +}) diff --git a/packages/core/src/providers/sse-reader.ts b/packages/core/src/providers/sse-reader.ts index 0bbb7ac..4c9cf8f 100644 --- a/packages/core/src/providers/sse-reader.ts +++ b/packages/core/src/providers/sse-reader.ts @@ -38,6 +38,8 @@ export interface StreamSSEResult { toolCalls: ToolCall[] /** 停止原因('tool_use' 表示需要执行工具后继续) */ stopReason?: string + /** Responses API 的 response.id(如有) */ + responseId?: string } /** @@ -77,6 +79,7 @@ export async function streamSSE(options: StreamSSEOptions): Promise { + test('Given baseUrl without version When normalize Then appends /v1', () => { + expect(normalizeOpenAIBaseUrl('https://api.openai.com')).toBe('https://api.openai.com/v1') + }) + + test('Given baseUrl already endsWith /v1 When normalize Then keeps /v1', () => { + expect(normalizeOpenAIBaseUrl('https://api.openai.com/v1')).toBe('https://api.openai.com/v1') + }) + + test('Given baseUrl endsWith endpoint suffix When normalize Then strips suffix and ensures version', () => { + expect(normalizeOpenAIBaseUrl('https://example.com/v1/chat/completions')).toBe('https://example.com/v1') + expect(normalizeOpenAIBaseUrl('https://example.com/v1/responses')).toBe('https://example.com/v1') + expect(normalizeOpenAIBaseUrl('https://example.com/v1/models')).toBe('https://example.com/v1') + }) +}) + diff --git a/packages/core/src/providers/url-utils.ts b/packages/core/src/providers/url-utils.ts index e32d690..b2150ea 100644 --- a/packages/core/src/providers/url-utils.ts +++ b/packages/core/src/providers/url-utils.ts @@ -56,3 +56,25 @@ export function normalizeAnthropicBaseUrlForSdk(baseUrl: string): string { export function normalizeBaseUrl(baseUrl: string): string { return baseUrl.trim().replace(/\/+$/, '') } + +/** + * 规范化 OpenAI Base URL + * + * OpenAI 官方 API 约定 Base URL 末尾包含版本号(通常为 /v1)。 + * 这里在不改变用户输入语义的前提下做容错: + * - 去除尾部斜杠 + * - 去除误填的终端路径(/chat/completions、/responses、/models) + * - 若末尾不含 /v{n},则自动追加 /v1 + */ +export function normalizeOpenAIBaseUrl(baseUrl: string): string { + let url = normalizeBaseUrl(baseUrl) + url = url + .replace(/\/chat\/completions$/, '') + .replace(/\/responses$/, '') + .replace(/\/models$/, '') + + if (!url.match(/\/v\d+$/)) { + url = `${url}/v1` + } + return url +} diff --git a/packages/core/tsconfig.json b/packages/core/tsconfig.json index 22d81b9..9ef6e06 100644 --- a/packages/core/tsconfig.json +++ b/packages/core/tsconfig.json @@ -11,5 +11,6 @@ "noEmit": true, "types": [] }, - "include": ["src/**/*"] + "include": ["src/**/*"], + "exclude": ["src/**/*.test.ts", "src/**/*.spec.ts"] } diff --git a/packages/shared/src/types/channel.ts b/packages/shared/src/types/channel.ts index f522058..2319d06 100644 --- a/packages/shared/src/types/channel.ts +++ b/packages/shared/src/types/channel.ts @@ -20,6 +20,14 @@ export type ProviderType = | 'qwen' | 'custom' +/** + * OpenAI 兼容供应商的 API 格式 + * + * - chat_completions: 兼容 /chat/completions(默认) + * - responses: OpenAI /responses 格式(仅对 OpenAI / 自定义渠道开放) + */ +export type ChannelApiFormat = 'chat_completions' | 'responses' + /** * 各供应商的默认 Base URL */ @@ -78,6 +86,8 @@ export interface Channel { provider: ProviderType /** API Base URL */ baseUrl: string + /** OpenAI 兼容供应商的 API 格式(缺省为 chat_completions) */ + apiFormat?: ChannelApiFormat /** 加密后的 API Key(base64 编码) */ apiKey: string /** 可用模型列表 */ @@ -97,6 +107,8 @@ export interface ChannelCreateInput { name: string provider: ProviderType baseUrl: string + /** OpenAI 兼容供应商的 API 格式(可选,缺省为 chat_completions) */ + apiFormat?: ChannelApiFormat /** 明文 API Key,主进程会加密后存储 */ apiKey: string models: ChannelModel[] @@ -110,6 +122,8 @@ export interface ChannelUpdateInput { name?: string provider?: ProviderType baseUrl?: string + /** OpenAI 兼容供应商的 API 格式(可选) */ + apiFormat?: ChannelApiFormat /** 明文 API Key,为空字符串表示不更新 */ apiKey?: string models?: ChannelModel[] @@ -134,6 +148,13 @@ export interface ChannelTestResult { success: boolean /** 结果消息 */ message: string + /** + * 探测/规范化后的 Base URL(可选) + * + * 主要用于 OpenAI 兼容服务的路径探测(例如自动判断是否需要 /v1)。 + * 调用方可据此自动回填表单 Base URL,避免用户手动修正。 + */ + resolvedBaseUrl?: string } /**