fix: 对话格式兼容改造

This commit is contained in:
Gsh
2026-01-11 00:15:31 +08:00
parent a9a9e45b7c
commit 53d70ef9d7
7 changed files with 651 additions and 64 deletions

View File

@@ -0,0 +1,492 @@
/**
* API 格式转换工具
* 支持 OpenAI Completions、OpenAI Responses、Anthropic Claude、Google Gemini 四种格式的相互转换
*/
// API 格式类型枚举
export enum ApiFormatType {
Completions = 'Completions', // OpenAI Chat Completion
Responses = 'Responses', // OpenAI Responses API
Messages = 'Messages', // Anthropic Claude API
GenerateContent = 'GenerateContent', // Google Gemini API
}
// 统一的消息格式(内部使用)
export interface UnifiedMessage {
role: 'system' | 'user' | 'assistant' | 'model';
content: string | MessageContent[];
}
// 消息内容块
export interface MessageContent {
type: 'text' | 'image_url';
text?: string;
image_url?: {
url: string;
name?: string;
};
}
// OpenAI Completions 格式
export interface CompletionsMessage {
role: 'system' | 'user' | 'assistant';
content: string | MessageContent[];
}
export interface CompletionsRequest {
model: string;
messages: CompletionsMessage[];
stream?: boolean;
max_tokens?: number;
}
// OpenAI Responses 格式
export interface ResponsesMessage {
role: 'system' | 'user' | 'assistant';
content: string | MessageContent[];
}
export interface ResponsesRequest {
model: string;
input: ResponsesMessage[];
stream?: boolean;
}
// Anthropic Claude 格式
export interface ClaudeMessage {
role: 'user' | 'assistant';
content: string | ClaudeContent[];
}
export interface ClaudeContent {
type: 'text' | 'image';
text?: string;
source?: {
type: 'base64';
media_type: string;
data: string;
};
}
export interface ClaudeRequest {
model: string;
messages: ClaudeMessage[];
max_tokens: number;
stream?: boolean;
system?: string;
}
// Google Gemini 格式
export interface GeminiPart {
text?: string;
inlineData?: {
mimeType: string;
data: string;
};
}
export interface GeminiContent {
role: 'user' | 'model';
parts: GeminiPart[];
}
export interface GeminiRequest {
contents: GeminiContent[];
generationConfig?: {
maxOutputTokens?: number;
};
}
/**
* 将统一格式的消息转换为 OpenAI Completions 格式
*/
export function toCompletionsFormat(messages: UnifiedMessage[]): CompletionsMessage[] {
return messages.map((msg) => {
const role = msg.role === 'model' ? 'assistant' : msg.role;
return {
role: role as 'system' | 'user' | 'assistant',
content: msg.content,
};
});
}
/**
* 将统一格式的消息转换为 OpenAI Responses 格式
*/
export function toResponsesFormat(messages: UnifiedMessage[]): ResponsesMessage[] {
return messages.map((msg) => {
const role = msg.role === 'model' ? 'assistant' : msg.role;
return {
role: role as 'system' | 'user' | 'assistant',
content: msg.content,
};
});
}
/**
* 将统一格式的消息转换为 Anthropic Claude 格式
*/
export function toClaudeFormat(messages: UnifiedMessage[]): { messages: ClaudeMessage[]; system?: string } {
let systemPrompt: string | undefined;
const claudeMessages: ClaudeMessage[] = [];
for (const msg of messages) {
// Claude 的 system 消息需要单独提取
if (msg.role === 'system') {
systemPrompt = typeof msg.content === 'string' ? msg.content : msg.content.map(c => c.text || '').join('');
continue;
}
const role = msg.role === 'model' ? 'assistant' : msg.role;
// 处理内容格式
let content: string | ClaudeContent[];
if (typeof msg.content === 'string') {
content = msg.content;
}
else {
content = msg.content.map((item) => {
if (item.type === 'text') {
return { type: 'text', text: item.text || '' };
}
else if (item.type === 'image_url' && item.image_url) {
// 将 base64 图片转换为 Claude 格式
const base64Data = item.image_url.url.replace(/^data:image\/\w+;base64,/, '');
const mimeType = item.image_url.url.match(/^data:(image\/\w+);base64,/)?.[1] || 'image/jpeg';
return {
type: 'image',
source: {
type: 'base64',
media_type: mimeType,
data: base64Data,
},
};
}
return { type: 'text', text: '' };
});
}
claudeMessages.push({
role: role as 'user' | 'assistant',
content,
});
}
return { messages: claudeMessages, system: systemPrompt };
}
/**
* 将统一格式的消息转换为 Google Gemini 格式
*/
export function toGeminiFormat(messages: UnifiedMessage[]): GeminiContent[] {
const geminiContents: GeminiContent[] = [];
for (const msg of messages) {
// Gemini 不支持 system 角色,跳过或转换为 user
if (msg.role === 'system') {
continue;
}
const role = msg.role === 'assistant' ? 'model' : 'user';
const parts: GeminiPart[] = [];
if (typeof msg.content === 'string') {
parts.push({ text: msg.content });
}
else {
for (const item of msg.content) {
if (item.type === 'text') {
parts.push({ text: item.text || '' });
}
else if (item.type === 'image_url' && item.image_url) {
// 将 base64 图片转换为 Gemini 格式
const base64Data = item.image_url.url.replace(/^data:image\/\w+;base64,/, '');
const mimeType = item.image_url.url.match(/^data:(image\/\w+);base64,/)?.[1] || 'image/jpeg';
parts.push({
inlineData: {
mimeType,
data: base64Data,
},
});
}
}
}
geminiContents.push({ role, parts });
}
return geminiContents;
}
/**
* 统一的响应数据接口(用于流式和非流式)
*/
export interface UnifiedStreamChunk {
content?: string;
reasoning_content?: string;
usage?: {
prompt_tokens?: number;
completion_tokens?: number;
total_tokens?: number;
};
finish_reason?: string;
}
/**
* 解析 OpenAI Completions 格式的流式响应
*/
export function parseCompletionsStreamChunk(chunk: any): UnifiedStreamChunk {
const delta = chunk.choices?.[0]?.delta;
const result: UnifiedStreamChunk = {};
if (delta?.reasoning_content) {
result.reasoning_content = delta.reasoning_content;
}
if (delta?.content) {
result.content = delta.content;
}
if (chunk.usage) {
result.usage = {
prompt_tokens: chunk.usage.prompt_tokens,
completion_tokens: chunk.usage.completion_tokens,
total_tokens: chunk.usage.total_tokens,
};
}
if (chunk.choices?.[0]?.finish_reason) {
result.finish_reason = chunk.choices[0].finish_reason;
}
return result;
}
/**
* 解析 OpenAI Responses 格式的流式响应
* 注意Responses API 需要跟踪当前输出项的类型来区分 reasoning 和 message
*/
export function parseResponsesStreamChunk(chunk: any): UnifiedStreamChunk {
const result: UnifiedStreamChunk = {};
// Responses API 使用事件驱动的流式响应
// 处理 response.output_item.added 事件 - 记录输出项类型
// 这个事件告诉我们接下来的内容是 reasoning 还是 message
if (chunk.type === 'response.output_item.added' && chunk.item) {
// 暂时不返回内容,只是标记
// 实际内容会在后续的 delta 事件中
}
// 处理 response.output_text.delta 事件 - 包含文本增量
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
// 根据 output_index 判断:
// output_index 0 通常是 reasoning思考链
// output_index 1 通常是 message正常回复
// 但更准确的方式是检查之前的 output_item.added 事件
// 这里我们简化处理:所有 delta 都作为正常内容
result.content = chunk.delta;
}
// 处理 response.completed 事件 - 包含 usage 信息
if (chunk.type === 'response.completed' && chunk.response?.usage) {
result.usage = {
prompt_tokens: chunk.response.usage.input_tokens,
completion_tokens: chunk.response.usage.output_tokens,
total_tokens: chunk.response.usage.total_tokens,
};
}
return result;
}
/**
* 解析 Anthropic Claude 格式的流式响应
*/
export function parseClaudeStreamChunk(chunk: any): UnifiedStreamChunk {
const result: UnifiedStreamChunk = {};
// Claude 流式响应格式 - 处理 content_block_delta 事件
if (chunk.type === 'content_block_delta') {
// text_delta 类型包含文本内容
if (chunk.delta?.type === 'text_delta' && chunk.delta?.text) {
result.content = chunk.delta.text;
}
}
// 处理 message_delta 事件 - 包含 usage 和 stop_reason
if (chunk.type === 'message_delta') {
if (chunk.usage) {
result.usage = {
prompt_tokens: chunk.usage.input_tokens,
completion_tokens: chunk.usage.output_tokens,
total_tokens: (chunk.usage.input_tokens || 0) + (chunk.usage.output_tokens || 0),
};
}
if (chunk.delta?.stop_reason) {
result.finish_reason = chunk.delta.stop_reason;
}
}
return result;
}
/**
* 解析 Google Gemini 格式的流式响应
*/
export function parseGeminiStreamChunk(chunk: any): UnifiedStreamChunk {
const result: UnifiedStreamChunk = {};
// Gemini 流式响应格式
const candidate = chunk.candidates?.[0];
const part = candidate?.content?.parts?.[0];
if (part?.text) {
// 检查是否是思考链内容
if (part.thought === true) {
// 这是思考过程,放入 reasoning_content
result.reasoning_content = part.text;
}
else {
// 这是正常回复内容
result.content = part.text;
}
}
if (chunk.usageMetadata) {
result.usage = {
prompt_tokens: chunk.usageMetadata.promptTokenCount,
completion_tokens: chunk.usageMetadata.candidatesTokenCount,
total_tokens: chunk.usageMetadata.totalTokenCount,
};
}
if (candidate?.finishReason) {
result.finish_reason = candidate.finishReason;
}
return result;
}
/**
* 解析 SSE 格式的数据块
* SSE 格式示例:
* event: content_block_delta
* data: {"type":"content_block_delta","delta":{"text":"hello"}}
*/
function parseSSEChunk(chunk: any): any {
// 如果已经是对象,直接返回
if (typeof chunk === 'object' && chunk !== null) {
return chunk;
}
// 如果是字符串,尝试解析 SSE 格式
if (typeof chunk === 'string') {
const lines = chunk.split('\n');
let dataLine = '';
for (const line of lines) {
if (line.startsWith('data: ')) {
dataLine = line.substring(6).trim();
break;
}
}
if (dataLine && dataLine !== '[DONE]') {
try {
return JSON.parse(dataLine);
}
catch (e) {
console.error('解析 SSE data 失败:', e, '原始数据:', dataLine);
return {};
}
}
}
return chunk;
}
/**
* 根据 API 格式类型解析流式响应数据块
* @param chunk 原始响应数据块
* @param apiType API 格式类型
* @returns 统一格式的响应数据
*/
export function parseStreamChunk(chunk: any, apiType: string): UnifiedStreamChunk {
// 先解析 SSE 格式(如果需要)
const parsedChunk = parseSSEChunk(chunk);
switch (apiType) {
case ApiFormatType.Completions:
return parseCompletionsStreamChunk(parsedChunk);
case ApiFormatType.Responses:
return parseResponsesStreamChunk(parsedChunk);
case ApiFormatType.Messages:
return parseClaudeStreamChunk(parsedChunk);
case ApiFormatType.GenerateContent:
return parseGeminiStreamChunk(parsedChunk);
default:
return parseCompletionsStreamChunk(parsedChunk);
}
}
/**
* 将消息转换为指定 API 格式的请求体
* @param messages 统一格式的消息列表
* @param apiType API 格式类型
* @param model 模型名称
* @param stream 是否流式
* @returns 对应格式的请求体
*/
export function convertToApiFormat(
messages: UnifiedMessage[],
apiType: string,
model: string,
stream = true,
): any {
switch (apiType) {
case ApiFormatType.Completions: {
const completionsMessages = toCompletionsFormat(messages);
return {
model,
messages: completionsMessages,
stream,
};
}
case ApiFormatType.Responses: {
const responsesMessages = toResponsesFormat(messages);
return {
model,
input: responsesMessages,
stream,
};
}
case ApiFormatType.Messages: {
const { messages: claudeMessages, system } = toClaudeFormat(messages);
const request: any = {
model,
messages: claudeMessages,
max_tokens: 32000,
stream,
};
if (system) {
request.system = system;
}
return request;
}
case ApiFormatType.GenerateContent: {
const geminiContents = toGeminiFormat(messages);
return {
contents: geminiContents,
};
}
default: {
const completionsMessages = toCompletionsFormat(messages);
return {
model,
messages: completionsMessages,
stream,
};
}
}
}