diff --git a/README.md b/README.md
index 975b7e47..72681afd 100644
--- a/README.md
+++ b/README.md
@@ -71,6 +71,21 @@ npm run dev
---
+## 🌐 Multi-Provider Support
+
+All Model Chat now supports multiple AI providers beyond Google Gemini:
+
+| Provider | Models | Context Window | Configuration |
+| :--- | :--- | :--- | :--- |
+| **Google Gemini** | Gemini 3.0, 2.5, Gemma | Up to 2M tokens | API Key or Environment Variable |
+| **MiniMax AI** | MiniMax-M2.7, MiniMax-M2.7-highspeed | 204K tokens | Settings > API Configuration |
+
+To use MiniMax models, enter your MiniMax API key in **Settings > API Configuration > MiniMax AI Configuration**, then select a MiniMax model from the model picker.
+
+> Get your MiniMax API key at [platform.minimax.chat](https://platform.minimax.chat)
+
+---
+
## 🛠️ 技术架构 / Technical Architecture
diff --git a/all-model-chat/components/settings/SettingsContent.tsx b/all-model-chat/components/settings/SettingsContent.tsx
index ee79dce7..a6a59fa5 100644
--- a/all-model-chat/components/settings/SettingsContent.tsx
+++ b/all-model-chat/components/settings/SettingsContent.tsx
@@ -114,6 +114,8 @@ export const SettingsContent: React.FC = ({
setApiProxyUrl={(val) => updateSetting('apiProxyUrl', val)}
useApiProxy={currentSettings.useApiProxy ?? false}
setUseApiProxy={(val) => updateSetting('useApiProxy', val)}
+ minimaxApiKey={currentSettings.minimaxApiKey ?? null}
+ setMinimaxApiKey={(val) => updateSetting('minimaxApiKey', val)}
availableModels={availableModels}
t={t as any}
/>
diff --git a/all-model-chat/components/settings/sections/ApiConfigSection.tsx b/all-model-chat/components/settings/sections/ApiConfigSection.tsx
index 10fc4ad6..1974534f 100644
--- a/all-model-chat/components/settings/sections/ApiConfigSection.tsx
+++ b/all-model-chat/components/settings/sections/ApiConfigSection.tsx
@@ -1,6 +1,6 @@
import React, { useState, useEffect } from 'react';
-import { KeyRound } from 'lucide-react';
+import { KeyRound, Sparkles } from 'lucide-react';
import { useResponsiveValue } from '../../../hooks/useDevice';
import { getClient } from '../../../services/api/baseApi';
import { parseApiKeys } from '../../../utils/apiUtils';
@@ -9,6 +9,7 @@ import { ApiKeyInput } from './api-config/ApiKeyInput';
import { ApiProxySettings } from './api-config/ApiProxySettings';
import { ApiConnectionTester } from './api-config/ApiConnectionTester';
import { ModelOption } from '../../../types';
+import { SETTINGS_INPUT_CLASS } from '../../../constants/appConstants';
interface ApiConfigSectionProps {
useCustomApiConfig: boolean;
@@ -19,6 +20,8 @@ interface ApiConfigSectionProps {
setApiProxyUrl: (value: string | null) => void;
useApiProxy: boolean;
setUseApiProxy: (value: boolean) => void;
+ minimaxApiKey: string | null;
+ setMinimaxApiKey: (value: string | null) => void;
availableModels: ModelOption[];
t: (key: string) => string;
}
@@ -42,6 +45,8 @@ export const ApiConfigSection: React.FC = ({
setApiProxyUrl,
useApiProxy,
setUseApiProxy,
+ minimaxApiKey,
+ setMinimaxApiKey,
availableModels,
t,
}) => {
@@ -170,6 +175,30 @@ export const ApiConfigSection: React.FC = ({
+
+ {/* MiniMax API Configuration */}
+
+
+
+ {t('settingsMiniMaxConfig')}
+
+
+ {t('settingsMiniMaxHelp')}
+
+
+ setMinimaxApiKey(e.target.value || null)}
+ placeholder={t('settingsMiniMaxKeyPlaceholder')}
+ className={`w-full px-4 py-2.5 rounded-lg text-sm border ${SETTINGS_INPUT_CLASS} focus:outline-none focus:ring-2 transition-colors duration-200`}
+ autoComplete="off"
+ />
+
+
+ {t('settingsMiniMaxModelsInfo')}
+
+
);
};
diff --git a/all-model-chat/constants/appConstants.ts b/all-model-chat/constants/appConstants.ts
index 80c3da6a..51ac4bf3 100644
--- a/all-model-chat/constants/appConstants.ts
+++ b/all-model-chat/constants/appConstants.ts
@@ -86,6 +86,7 @@ export const DEFAULT_APP_SETTINGS: AppSettings = {
apiKey: null,
apiProxyUrl: "https://api-proxy.de/gemini/v1beta",
useApiProxy: false,
+ minimaxApiKey: null,
language: 'system',
isStreamingEnabled: DEFAULT_IS_STREAMING_ENABLED,
transcriptionModelId: DEFAULT_TRANSCRIPTION_MODEL_ID,
diff --git a/all-model-chat/constants/providerConstants.ts b/all-model-chat/constants/providerConstants.ts
new file mode 100644
index 00000000..a0320a6f
--- /dev/null
+++ b/all-model-chat/constants/providerConstants.ts
@@ -0,0 +1,49 @@
+
+import { ModelOption } from '../types';
+
+/**
+ * Provider type identifiers.
+ * 'gemini' uses the @google/genai SDK directly.
+ * 'openai-compatible' uses the OpenAI-compatible /v1/chat/completions endpoint.
+ */
+export type ProviderType = 'gemini' | 'openai-compatible';
+
+export interface ProviderPreset {
+ name: string;
+ type: ProviderType;
+ baseUrl: string;
+ models: ModelOption[];
+ /** Model ID prefix used for detection. */
+ prefix: string;
+}
+
+export const MINIMAX_BASE_URL = 'https://api.minimax.io/v1';
+
+export const MINIMAX_MODELS: ModelOption[] = [
+ { id: 'MiniMax-M2.7', name: 'MiniMax M2.7', isPinned: true },
+ { id: 'MiniMax-M2.7-highspeed', name: 'MiniMax M2.7 Highspeed', isPinned: true },
+];
+
+/**
+ * Check if a model ID belongs to MiniMax.
+ */
+export const isMiniMaxModel = (modelId: string): boolean => {
+ return modelId.startsWith('MiniMax-');
+};
+
+/**
+ * Check if a model uses the OpenAI-compatible API path.
+ */
+export const isOpenAICompatModel = (modelId: string): boolean => {
+ return isMiniMaxModel(modelId);
+};
+
+/**
+ * Get the base URL for an OpenAI-compatible model.
+ */
+export const getOpenAICompatBaseUrl = (modelId: string): string => {
+ if (isMiniMaxModel(modelId)) {
+ return MINIMAX_BASE_URL;
+ }
+ return '';
+};
diff --git a/all-model-chat/hooks/message-sender/standard/useApiInteraction.ts b/all-model-chat/hooks/message-sender/standard/useApiInteraction.ts
index 5b9cc635..f6bfec32 100644
--- a/all-model-chat/hooks/message-sender/standard/useApiInteraction.ts
+++ b/all-model-chat/hooks/message-sender/standard/useApiInteraction.ts
@@ -2,8 +2,10 @@
import React, { useCallback, Dispatch, SetStateAction } from 'react';
import { AppSettings, ChatMessage, ChatSettings as IndividualChatSettings, UploadedFile } from '../../../types';
import { createChatHistoryForApi, isGemini3Model, logService } from '../../../utils/appUtils';
+import { isOpenAICompatModel } from '../../../utils/modelHelpers';
import { buildGenerationConfig } from '../../../services/api/baseApi';
import { geminiServiceInstance } from '../../../services/geminiService';
+import { sendOpenAICompatMessageStream, sendOpenAICompatMessageNonStream } from '../../../services/api/openaiCompatApi';
import { pyodideService } from '../../../services/pyodideService';
import { isLikelyHtml } from '../../../utils/codeUtils';
import { GetStreamHandlers } from '../types';
@@ -112,24 +114,6 @@ export const useApiInteraction = ({
const shouldStripThinking = sessionToUpdate.hideThinkingInContext ?? appSettings.hideThinkingInContext;
const historyForChat = await createChatHistoryForApi(baseMessagesForApi, shouldStripThinking);
- const config = buildGenerationConfig(
- activeModelId,
- sessionToUpdate.systemInstruction,
- { temperature: sessionToUpdate.temperature, topP: sessionToUpdate.topP },
- sessionToUpdate.showThoughts,
- sessionToUpdate.thinkingBudget,
- !!sessionToUpdate.isGoogleSearchEnabled,
- !!sessionToUpdate.isCodeExecutionEnabled,
- !!sessionToUpdate.isUrlContextEnabled,
- sessionToUpdate.thinkingLevel,
- aspectRatio,
- sessionToUpdate.isDeepSearchEnabled,
- imageSize,
- sessionToUpdate.safetySettings,
- sessionToUpdate.mediaResolution,
- !!sessionToUpdate.isLocalPythonEnabled
- );
-
const { streamOnError, streamOnComplete, streamOnPart, onThoughtChunk } = getStreamHandlers(
finalSessionId,
generationId,
@@ -149,6 +133,71 @@ export const useApiInteraction = ({
setSessionLoading(finalSessionId, true);
activeJobs.current.set(generationId, newAbortController);
+ // Route through OpenAI-compatible API for MiniMax and other non-Gemini providers
+ if (isOpenAICompatModel(activeModelId)) {
+ const minimaxKey = appSettings.minimaxApiKey;
+ if (!minimaxKey) {
+ streamOnError(new Error('MiniMax API Key is not configured. Please set it in Settings > API Configuration.'));
+ return;
+ }
+
+ const openaiConfig = {
+ temperature: sessionToUpdate.temperature,
+ topP: sessionToUpdate.topP,
+ systemInstruction: sessionToUpdate.systemInstruction,
+ };
+
+ if (appSettings.isStreamingEnabled) {
+ await sendOpenAICompatMessageStream(
+ minimaxKey,
+ activeModelId,
+ historyForChat,
+ finalParts,
+ openaiConfig,
+ newAbortController.signal,
+ streamOnPart,
+ onThoughtChunk,
+ streamOnError,
+ streamOnComplete,
+ finalRole
+ );
+ } else {
+ await sendOpenAICompatMessageNonStream(
+ minimaxKey,
+ activeModelId,
+ historyForChat,
+ finalParts,
+ openaiConfig,
+ newAbortController.signal,
+ streamOnError,
+ (parts, thoughts, usage, grounding) => {
+ for (const part of parts) streamOnPart(part);
+ if (thoughts) onThoughtChunk(thoughts);
+ streamOnComplete(usage, grounding);
+ }
+ );
+ }
+ return;
+ }
+
+ const config = buildGenerationConfig(
+ activeModelId,
+ sessionToUpdate.systemInstruction,
+ { temperature: sessionToUpdate.temperature, topP: sessionToUpdate.topP },
+ sessionToUpdate.showThoughts,
+ sessionToUpdate.thinkingBudget,
+ !!sessionToUpdate.isGoogleSearchEnabled,
+ !!sessionToUpdate.isCodeExecutionEnabled,
+ !!sessionToUpdate.isUrlContextEnabled,
+ sessionToUpdate.thinkingLevel,
+ aspectRatio,
+ sessionToUpdate.isDeepSearchEnabled,
+ imageSize,
+ sessionToUpdate.safetySettings,
+ sessionToUpdate.mediaResolution,
+ !!sessionToUpdate.isLocalPythonEnabled
+ );
+
if (appSettings.isStreamingEnabled) {
await geminiServiceInstance.sendMessageStream(
keyToUse,
diff --git a/all-model-chat/hooks/message-sender/useStandardChat.ts b/all-model-chat/hooks/message-sender/useStandardChat.ts
index 273f08d6..9f9e7574 100644
--- a/all-model-chat/hooks/message-sender/useStandardChat.ts
+++ b/all-model-chat/hooks/message-sender/useStandardChat.ts
@@ -1,6 +1,7 @@
import { useCallback } from 'react';
import { generateUniqueId, buildContentParts, getKeyForRequest, performOptimisticSessionUpdate, logService } from '../../utils/appUtils';
+import { isOpenAICompatModel } from '../../utils/modelHelpers';
import { DEFAULT_CHAT_SETTINGS, MODELS_SUPPORTING_RAW_MODE } from '../../constants/appConstants';
import { UploadedFile, ChatMessage } from '../../types';
import { StandardChatProps } from './types';
@@ -66,25 +67,37 @@ export const useStandardChat = ({
logService.info(`Fast Mode activated (One-off): Overriding thinking level to ${targetLevel}.`);
}
- const keyResult = getKeyForRequest(appSettings, settingsForApi);
- if ('error' in keyResult) {
- logService.error("Send message failed: API Key not configured.");
- const errorMsg: ChatMessage = { id: generateUniqueId(), role: 'error', content: keyResult.error, timestamp: new Date() };
- const newSessionId = generateUniqueId();
-
- updateAndPersistSessions(prev => performOptimisticSessionUpdate(prev, {
- activeSessionId: null,
- newSessionId,
- newMessages: [errorMsg],
- settings: { ...DEFAULT_CHAT_SETTINGS, ...appSettings },
- appSettings,
- title: "API Key Error"
- }));
- setActiveSessionId(newSessionId);
- return;
+ // For OpenAI-compatible providers (MiniMax), bypass Gemini key check
+ let keyToUse: string;
+ let isNewKey = false;
+ let shouldLockKey = false;
+
+ if (isOpenAICompatModel(activeModelId)) {
+ // MiniMax models use their own API key, managed in useApiInteraction
+ keyToUse = 'openai-compat-placeholder';
+ isNewKey = false;
+ } else {
+ const keyResult = getKeyForRequest(appSettings, settingsForApi);
+ if ('error' in keyResult) {
+ logService.error("Send message failed: API Key not configured.");
+ const errorMsg: ChatMessage = { id: generateUniqueId(), role: 'error', content: keyResult.error, timestamp: new Date() };
+ const newSessionId = generateUniqueId();
+
+ updateAndPersistSessions(prev => performOptimisticSessionUpdate(prev, {
+ activeSessionId: null,
+ newSessionId,
+ newMessages: [errorMsg],
+ settings: { ...DEFAULT_CHAT_SETTINGS, ...appSettings },
+ appSettings,
+ title: "API Key Error"
+ }));
+ setActiveSessionId(newSessionId);
+ return;
+ }
+ keyToUse = keyResult.key;
+ isNewKey = keyResult.isNewKey;
+ shouldLockKey = isNewKey && filesToUse.some(f => f.fileUri && f.uploadState === 'active');
}
- const { key: keyToUse, isNewKey } = keyResult;
- const shouldLockKey = isNewKey && filesToUse.some(f => f.fileUri && f.uploadState === 'active');
const newAbortController = new AbortController();
diff --git a/all-model-chat/package.json b/all-model-chat/package.json
index 9fa98dc8..c245a5db 100644
--- a/all-model-chat/package.json
+++ b/all-model-chat/package.json
@@ -6,7 +6,9 @@
"scripts": {
"dev": "vite",
"build": "vite build",
- "preview": "vite preview"
+ "preview": "vite preview",
+ "test": "vitest run",
+ "test:watch": "vitest"
},
"dependencies": {
"@formkit/auto-animate": "^0.8.2",
@@ -39,6 +41,8 @@
"xlsx": "^0.18.5"
},
"devDependencies": {
+ "@testing-library/jest-dom": "^6.9.1",
+ "@testing-library/react": "^16.3.2",
"@types/dompurify": "^3.0.5",
"@types/katex": "^0.16.7",
"@types/node": "^20.14.10",
@@ -47,10 +51,12 @@
"@types/turndown": "^5.0.5",
"@vitejs/plugin-react": "^4.3.1",
"autoprefixer": "^10.4.19",
+ "jsdom": "^29.0.1",
"postcss": "^8.4.39",
"tailwindcss": "^3.4.4",
"typescript": "^5.5.3",
"vite": "^5.3.3",
- "vite-plugin-static-copy": "^1.0.0"
+ "vite-plugin-static-copy": "^1.0.0",
+ "vitest": "^4.1.2"
}
}
diff --git a/all-model-chat/services/api/openaiCompatApi.ts b/all-model-chat/services/api/openaiCompatApi.ts
new file mode 100644
index 00000000..75be1a6a
--- /dev/null
+++ b/all-model-chat/services/api/openaiCompatApi.ts
@@ -0,0 +1,347 @@
+
+import { Part, UsageMetadata, ChatHistoryItem } from "@google/genai";
+import { logService } from "../logService";
+import { getOpenAICompatBaseUrl } from "../../constants/providerConstants";
+
+/**
+ * OpenAI-compatible chat API using native fetch().
+ * Supports streaming (SSE) and non-streaming modes.
+ * Used for MiniMax and other OpenAI-compatible providers.
+ */
+
+interface OpenAIMessage {
+ role: 'system' | 'user' | 'assistant';
+ content: string;
+}
+
+interface OpenAIStreamChoice {
+ delta?: { content?: string; role?: string };
+ finish_reason?: string | null;
+ index: number;
+}
+
+interface OpenAIStreamChunk {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: OpenAIStreamChoice[];
+ usage?: {
+ prompt_tokens?: number;
+ completion_tokens?: number;
+ total_tokens?: number;
+ };
+}
+
+interface OpenAIResponse {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: {
+ message: { role: string; content: string };
+ finish_reason: string;
+ index: number;
+ }[];
+ usage?: {
+ prompt_tokens?: number;
+ completion_tokens?: number;
+ total_tokens?: number;
+ };
+}
+
+/**
+ * Convert Gemini-format chat history to OpenAI messages format.
+ */
+export const convertHistoryToOpenAIMessages = (
+ history: ChatHistoryItem[],
+ parts: Part[],
+ systemInstruction?: string,
+ role: 'user' | 'model' = 'user'
+): OpenAIMessage[] => {
+ const messages: OpenAIMessage[] = [];
+
+ // Add system instruction if provided
+ if (systemInstruction) {
+ messages.push({ role: 'system', content: systemInstruction });
+ }
+
+ // Convert history
+ for (const item of history) {
+ const textParts: string[] = [];
+ for (const part of item.parts) {
+ if (part.text) {
+ textParts.push(part.text);
+ } else if ((part as any).inlineData) {
+ textParts.push('[Attachment: media content]');
+ } else if ((part as any).fileData) {
+ textParts.push('[Attachment: file reference]');
+ }
+ }
+ const content = textParts.join('\n');
+ if (content) {
+ messages.push({
+ role: item.role === 'model' ? 'assistant' : 'user',
+ content,
+ });
+ }
+ }
+
+ // Add current message parts
+ const currentTextParts: string[] = [];
+ for (const part of parts) {
+ if (part.text) {
+ currentTextParts.push(part.text);
+ }
+ }
+ const currentContent = currentTextParts.join('\n');
+ if (currentContent) {
+ messages.push({
+ role: role === 'model' ? 'assistant' : 'user',
+ content: currentContent,
+ });
+ }
+
+ return messages;
+};
+
+/**
+ * Clamp temperature for MiniMax: must be in (0.0, 1.0].
+ */
+const clampTemperature = (temp?: number): number => {
+ if (temp === undefined || temp === null) return 0.7;
+ if (temp <= 0) return 0.01;
+ if (temp > 1.0) return 1.0;
+ return temp;
+};
+
+/**
+ * Strip ... tags from MiniMax M2.7 response content.
+ * M2.7 may include thinking blocks in the response.
+ */
+const stripThinkingTags = (content: string): { text: string; thoughts: string } => {
+ const thinkingRegex = /([\s\S]*?)<\/thinking>/gi;
+ let thoughts = '';
+ let match: RegExpExecArray | null;
+
+ while ((match = thinkingRegex.exec(content)) !== null) {
+ thoughts += match[1];
+ }
+
+ const text = content.replace(thinkingRegex, '').trim();
+ return { text, thoughts };
+};
+
+/**
+ * Send a streaming chat message via OpenAI-compatible API.
+ */
+export const sendOpenAICompatMessageStream = async (
+ apiKey: string,
+ modelId: string,
+ history: ChatHistoryItem[],
+ parts: Part[],
+ config: { temperature?: number; topP?: number; systemInstruction?: string },
+ abortSignal: AbortSignal,
+ onPart: (part: Part) => void,
+ onThoughtChunk: (chunk: string) => void,
+ onError: (error: Error) => void,
+ onComplete: (usageMetadata?: UsageMetadata, groundingMetadata?: any, urlContextMetadata?: any) => void,
+ role: 'user' | 'model' = 'user'
+): Promise => {
+ const baseUrl = getOpenAICompatBaseUrl(modelId);
+ logService.info(`[OpenAI-Compat] Sending streaming message for ${modelId} to ${baseUrl}`);
+
+ let finalUsage: UsageMetadata | undefined;
+ let accumulatedContent = '';
+
+ try {
+ const messages = convertHistoryToOpenAIMessages(history, parts, config.systemInstruction, role);
+
+ const requestBody: any = {
+ model: modelId,
+ messages,
+ stream: true,
+ temperature: clampTemperature(config.temperature),
+ };
+
+ if (config.topP !== undefined && config.topP !== null) {
+ requestBody.top_p = config.topP;
+ }
+
+ const response = await fetch(`${baseUrl}/chat/completions`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${apiKey}`,
+ },
+ body: JSON.stringify(requestBody),
+ signal: abortSignal,
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ let errorMessage = `API error ${response.status}`;
+ try {
+ const errorJson = JSON.parse(errorText);
+ errorMessage = errorJson.error?.message || errorJson.message || errorMessage;
+ } catch {
+ errorMessage = errorText || errorMessage;
+ }
+ throw new Error(errorMessage);
+ }
+
+ const reader = response.body?.getReader();
+ if (!reader) {
+ throw new Error('Response body is not readable');
+ }
+
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ while (true) {
+ if (abortSignal.aborted) {
+ logService.warn('[OpenAI-Compat] Streaming aborted by signal.');
+ reader.cancel();
+ break;
+ }
+
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ buffer += decoder.decode(value, { stream: true });
+ const lines = buffer.split('\n');
+ buffer = lines.pop() || '';
+
+ for (const line of lines) {
+ const trimmed = line.trim();
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
+
+ const data = trimmed.slice(6);
+ if (data === '[DONE]') continue;
+
+ try {
+ const chunk: OpenAIStreamChunk = JSON.parse(data);
+ const choice = chunk.choices?.[0];
+
+ if (choice?.delta?.content) {
+ accumulatedContent += choice.delta.content;
+ onPart({ text: choice.delta.content });
+ }
+
+ if (chunk.usage) {
+ finalUsage = {
+ promptTokenCount: chunk.usage.prompt_tokens || 0,
+ candidatesTokenCount: chunk.usage.completion_tokens || 0,
+ totalTokenCount: chunk.usage.total_tokens || 0,
+ } as UsageMetadata;
+ }
+ } catch (parseError) {
+ logService.debug('[OpenAI-Compat] Failed to parse SSE chunk:', parseError);
+ }
+ }
+ }
+
+ // Post-process: strip thinking tags from accumulated content
+ if (accumulatedContent) {
+ const { thoughts } = stripThinkingTags(accumulatedContent);
+ if (thoughts) {
+ onThoughtChunk(thoughts);
+ }
+ }
+
+ } catch (error) {
+ if (abortSignal.aborted) {
+ logService.warn('[OpenAI-Compat] Request was aborted.');
+ } else {
+ logService.error('[OpenAI-Compat] Error sending streaming message:', error);
+ onError(error instanceof Error ? error : new Error(String(error)));
+ }
+ } finally {
+ logService.info('[OpenAI-Compat] Streaming complete.', { usage: finalUsage });
+ onComplete(finalUsage, undefined, undefined);
+ }
+};
+
+/**
+ * Send a non-streaming chat message via OpenAI-compatible API.
+ */
+export const sendOpenAICompatMessageNonStream = async (
+ apiKey: string,
+ modelId: string,
+ history: ChatHistoryItem[],
+ parts: Part[],
+ config: { temperature?: number; topP?: number; systemInstruction?: string },
+ abortSignal: AbortSignal,
+ onError: (error: Error) => void,
+ onComplete: (parts: Part[], thoughtsText?: string, usageMetadata?: UsageMetadata, groundingMetadata?: any, urlContextMetadata?: any) => void
+): Promise => {
+ const baseUrl = getOpenAICompatBaseUrl(modelId);
+ logService.info(`[OpenAI-Compat] Sending non-streaming message for ${modelId} to ${baseUrl}`);
+
+ try {
+ const messages = convertHistoryToOpenAIMessages(history, parts, config.systemInstruction);
+
+ const requestBody: any = {
+ model: modelId,
+ messages,
+ stream: false,
+ temperature: clampTemperature(config.temperature),
+ };
+
+ if (config.topP !== undefined && config.topP !== null) {
+ requestBody.top_p = config.topP;
+ }
+
+ const response = await fetch(`${baseUrl}/chat/completions`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${apiKey}`,
+ },
+ body: JSON.stringify(requestBody),
+ signal: abortSignal,
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ let errorMessage = `API error ${response.status}`;
+ try {
+ const errorJson = JSON.parse(errorText);
+ errorMessage = errorJson.error?.message || errorJson.message || errorMessage;
+ } catch {
+ errorMessage = errorText || errorMessage;
+ }
+ throw new Error(errorMessage);
+ }
+
+ const data: OpenAIResponse = await response.json();
+
+ if (abortSignal.aborted) {
+ onComplete([], '', undefined, undefined, undefined);
+ return;
+ }
+
+ const rawContent = data.choices?.[0]?.message?.content || '';
+ const { text, thoughts } = stripThinkingTags(rawContent);
+
+ const responseParts: Part[] = text ? [{ text }] : [];
+
+ const usage: UsageMetadata | undefined = data.usage ? {
+ promptTokenCount: data.usage.prompt_tokens || 0,
+ candidatesTokenCount: data.usage.completion_tokens || 0,
+ totalTokenCount: data.usage.total_tokens || 0,
+ } as UsageMetadata : undefined;
+
+ logService.info(`[OpenAI-Compat] Non-stream complete for ${modelId}.`, { usage });
+ onComplete(responseParts, thoughts || undefined, usage, undefined, undefined);
+
+ } catch (error) {
+ if (abortSignal.aborted) {
+ logService.warn('[OpenAI-Compat] Request was aborted.');
+ onComplete([], '', undefined, undefined, undefined);
+ } else {
+ logService.error('[OpenAI-Compat] Error sending non-streaming message:', error);
+ onError(error instanceof Error ? error : new Error(String(error)));
+ }
+ }
+};
diff --git a/all-model-chat/tests/openaiCompatApi.test.ts b/all-model-chat/tests/openaiCompatApi.test.ts
new file mode 100644
index 00000000..fc8548b7
--- /dev/null
+++ b/all-model-chat/tests/openaiCompatApi.test.ts
@@ -0,0 +1,466 @@
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { convertHistoryToOpenAIMessages } from '../services/api/openaiCompatApi';
+
+// Mock logService since it depends on browser APIs
+vi.mock('../services/logService', () => ({
+ logService: {
+ info: vi.fn(),
+ warn: vi.fn(),
+ error: vi.fn(),
+ debug: vi.fn(),
+ },
+}));
+
+describe('openaiCompatApi', () => {
+ describe('convertHistoryToOpenAIMessages', () => {
+ it('should convert empty history with text parts', () => {
+ const messages = convertHistoryToOpenAIMessages(
+ [],
+ [{ text: 'Hello, world!' }],
+ );
+ expect(messages).toEqual([
+ { role: 'user', content: 'Hello, world!' },
+ ]);
+ });
+
+ it('should add system instruction when provided', () => {
+ const messages = convertHistoryToOpenAIMessages(
+ [],
+ [{ text: 'Hi' }],
+ 'You are a helpful assistant.',
+ );
+ expect(messages).toEqual([
+ { role: 'system', content: 'You are a helpful assistant.' },
+ { role: 'user', content: 'Hi' },
+ ]);
+ });
+
+ it('should convert model role to assistant', () => {
+ const history = [
+ { role: 'user' as const, parts: [{ text: 'Hello' }] },
+ { role: 'model' as const, parts: [{ text: 'Hi there!' }] },
+ ];
+ const messages = convertHistoryToOpenAIMessages(
+ history,
+ [{ text: 'How are you?' }],
+ );
+ expect(messages).toEqual([
+ { role: 'user', content: 'Hello' },
+ { role: 'assistant', content: 'Hi there!' },
+ { role: 'user', content: 'How are you?' },
+ ]);
+ });
+
+ it('should handle multi-part messages by joining with newline', () => {
+ const history = [
+ { role: 'user' as const, parts: [{ text: 'Part 1' }, { text: 'Part 2' }] },
+ ];
+ const messages = convertHistoryToOpenAIMessages(history, [{ text: 'Question' }]);
+ expect(messages[0].content).toBe('Part 1\nPart 2');
+ });
+
+ it('should handle inline data parts as attachment placeholder', () => {
+ const history = [
+ {
+ role: 'user' as const,
+ parts: [
+ { text: 'Check this image' },
+ { inlineData: { mimeType: 'image/png', data: 'base64...' } } as any,
+ ],
+ },
+ ];
+ const messages = convertHistoryToOpenAIMessages(history, [{ text: 'What is it?' }]);
+ expect(messages[0].content).toContain('Check this image');
+ expect(messages[0].content).toContain('[Attachment: media content]');
+ });
+
+ it('should handle fileData parts as attachment placeholder', () => {
+ const history = [
+ {
+ role: 'user' as const,
+ parts: [
+ { fileData: { mimeType: 'application/pdf', fileUri: 'gs://...' } } as any,
+ ],
+ },
+ ];
+ const messages = convertHistoryToOpenAIMessages(history, [{ text: 'Summarize' }]);
+ expect(messages[0].content).toContain('[Attachment: file reference]');
+ });
+
+ it('should respect role parameter for current message', () => {
+ const messages = convertHistoryToOpenAIMessages(
+ [],
+ [{ text: 'Continue the story' }],
+ undefined,
+ 'model',
+ );
+ expect(messages[0].role).toBe('assistant');
+ });
+
+ it('should skip empty history items', () => {
+ const history = [
+ { role: 'user' as const, parts: [] },
+ { role: 'model' as const, parts: [{ text: 'Response' }] },
+ ];
+ const messages = convertHistoryToOpenAIMessages(history, [{ text: 'Next' }]);
+ // Empty user message should be skipped
+ expect(messages.length).toBe(2);
+ expect(messages[0].role).toBe('assistant');
+ });
+
+ it('should handle complex conversation with system instruction', () => {
+ const history = [
+ { role: 'user' as const, parts: [{ text: 'Tell me a joke' }] },
+ { role: 'model' as const, parts: [{ text: 'Why did the chicken...' }] },
+ { role: 'user' as const, parts: [{ text: 'Another one' }] },
+ { role: 'model' as const, parts: [{ text: 'Knock knock...' }] },
+ ];
+ const messages = convertHistoryToOpenAIMessages(
+ history,
+ [{ text: 'One more please' }],
+ 'You are a comedian.',
+ );
+ expect(messages.length).toBe(6); // system + 4 history + 1 current
+ expect(messages[0].role).toBe('system');
+ expect(messages[1].role).toBe('user');
+ expect(messages[2].role).toBe('assistant');
+ expect(messages[3].role).toBe('user');
+ expect(messages[4].role).toBe('assistant');
+ expect(messages[5].role).toBe('user');
+ });
+ });
+
+ describe('temperature clamping (via internal clampTemperature)', () => {
+ // We can't test clampTemperature directly as it's not exported,
+ // but we test it indirectly through the request body construction.
+ // These tests verify the behavior through the convertHistoryToOpenAIMessages function
+ // which is a prerequisite for the actual API call.
+
+ it('should handle undefined system instruction gracefully', () => {
+ const messages = convertHistoryToOpenAIMessages(
+ [],
+ [{ text: 'Hello' }],
+ undefined,
+ );
+ expect(messages.length).toBe(1);
+ expect(messages[0].role).toBe('user');
+ });
+
+ it('should handle empty system instruction', () => {
+ const messages = convertHistoryToOpenAIMessages(
+ [],
+ [{ text: 'Hello' }],
+ '',
+ );
+ // Empty string is falsy, should not add system message
+ expect(messages.length).toBe(1);
+ });
+ });
+});
+
+describe('openaiCompatApi - streaming integration', () => {
+ beforeEach(() => {
+ vi.stubGlobal('fetch', vi.fn());
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ it('should handle API error responses gracefully', async () => {
+ const { sendOpenAICompatMessageStream } = await import('../services/api/openaiCompatApi');
+
+ const mockResponse = new Response(JSON.stringify({ error: { message: 'Invalid API key' } }), {
+ status: 401,
+ headers: { 'Content-Type': 'application/json' },
+ });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ const onError = vi.fn();
+ const onComplete = vi.fn();
+ const onPart = vi.fn();
+ const onThought = vi.fn();
+
+ await sendOpenAICompatMessageStream(
+ 'invalid-key',
+ 'MiniMax-M2.7',
+ [],
+ [{ text: 'Hello' }],
+ { temperature: 0.7 },
+ new AbortController().signal,
+ onPart,
+ onThought,
+ onError,
+ onComplete,
+ );
+
+ expect(onError).toHaveBeenCalledWith(expect.objectContaining({
+ message: expect.stringContaining('Invalid API key'),
+ }));
+ expect(onComplete).toHaveBeenCalled();
+ });
+
+ it('should send correct request to MiniMax endpoint', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const mockResponse = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content: 'Hello!' }, finish_reason: 'stop', index: 0 }],
+ usage: { prompt_tokens: 5, completion_tokens: 3, total_tokens: 8 },
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ const onError = vi.fn();
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-api-key',
+ 'MiniMax-M2.7',
+ [],
+ [{ text: 'Hello' }],
+ { temperature: 0.7, systemInstruction: 'Be helpful' },
+ new AbortController().signal,
+ onError,
+ onComplete,
+ );
+
+ // Verify fetch was called with correct URL
+ expect(fetch).toHaveBeenCalledWith(
+ 'https://api.minimax.io/v1/chat/completions',
+ expect.objectContaining({
+ method: 'POST',
+ headers: expect.objectContaining({
+ 'Authorization': 'Bearer test-api-key',
+ 'Content-Type': 'application/json',
+ }),
+ }),
+ );
+
+ // Verify the request body
+ const callArgs = vi.mocked(fetch).mock.calls[0];
+ const body = JSON.parse(callArgs[1]?.body as string);
+ expect(body.model).toBe('MiniMax-M2.7');
+ expect(body.stream).toBe(false);
+ expect(body.temperature).toBe(0.7);
+ expect(body.messages).toEqual([
+ { role: 'system', content: 'Be helpful' },
+ { role: 'user', content: 'Hello' },
+ ]);
+
+ // Verify completion was called with proper data
+ expect(onError).not.toHaveBeenCalled();
+ expect(onComplete).toHaveBeenCalledWith(
+ [{ text: 'Hello!' }],
+ undefined,
+ expect.objectContaining({
+ promptTokenCount: 5,
+ candidatesTokenCount: 3,
+ totalTokenCount: 8,
+ }),
+ undefined,
+ undefined,
+ );
+ });
+
+ it('should strip thinking tags from response', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const content = 'Let me think about this... The answer is 42.';
+ const mockResponse = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content }, finish_reason: 'stop', index: 0 }],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key',
+ 'MiniMax-M2.7',
+ [],
+ [{ text: 'Question' }],
+ { temperature: 0.7 },
+ new AbortController().signal,
+ vi.fn(),
+ onComplete,
+ );
+
+ // Should strip thinking tags from main content
+ expect(onComplete).toHaveBeenCalledWith(
+ [{ text: 'The answer is 42.' }],
+ 'Let me think about this...',
+ undefined,
+ undefined,
+ undefined,
+ );
+ });
+
+ it('should clamp temperature to (0, 1] range', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ // Test with temperature 0 (should be clamped to 0.01)
+ const mockResponse1 = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop', index: 0 }],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse1);
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0 },
+ new AbortController().signal, vi.fn(), vi.fn(),
+ );
+
+ let body = JSON.parse(vi.mocked(fetch).mock.calls[0][1]?.body as string);
+ expect(body.temperature).toBe(0.01);
+
+ vi.mocked(fetch).mockClear();
+
+ // Test with temperature > 1 (should be clamped to 1.0)
+ const mockResponse2 = new Response(JSON.stringify({
+ id: 'test-id-2',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop', index: 0 }],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse2);
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 2.0 },
+ new AbortController().signal, vi.fn(), vi.fn(),
+ );
+
+ body = JSON.parse(vi.mocked(fetch).mock.calls[0][1]?.body as string);
+ expect(body.temperature).toBe(1.0);
+ });
+
+ it('should handle abort signal', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const abortController = new AbortController();
+ abortController.abort();
+
+ const mockResponse = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content: 'test' }, finish_reason: 'stop', index: 0 }],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockRejectedValue(new DOMException('The operation was aborted.', 'AbortError'));
+
+ const onError = vi.fn();
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0.7 },
+ abortController.signal, onError, onComplete,
+ );
+
+ // Should not call onError when aborted
+ expect(onError).not.toHaveBeenCalled();
+ expect(onComplete).toHaveBeenCalledWith([], '', undefined, undefined, undefined);
+ });
+});
+
+describe('openaiCompatApi - non-streaming integration', () => {
+ beforeEach(() => {
+ vi.stubGlobal('fetch', vi.fn());
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ it('should handle network errors', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ vi.mocked(fetch).mockRejectedValue(new Error('Network error'));
+
+ const onError = vi.fn();
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0.7 },
+ new AbortController().signal, onError, onComplete,
+ );
+
+ expect(onError).toHaveBeenCalledWith(expect.objectContaining({
+ message: 'Network error',
+ }));
+ });
+
+ it('should handle malformed JSON error response', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const mockResponse = new Response('Internal Server Error', {
+ status: 500,
+ headers: { 'Content-Type': 'text/plain' },
+ });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ const onError = vi.fn();
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0.7 },
+ new AbortController().signal, onError, onComplete,
+ );
+
+ expect(onError).toHaveBeenCalledWith(expect.objectContaining({
+ message: expect.stringContaining('Internal Server Error'),
+ }));
+ });
+
+ it('should handle empty response choices', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const mockResponse = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ const onComplete = vi.fn();
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0.7 },
+ new AbortController().signal, vi.fn(), onComplete,
+ );
+
+ // Should complete with empty parts
+ expect(onComplete).toHaveBeenCalledWith(
+ [],
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ );
+ });
+
+ it('should pass topP when provided', async () => {
+ const { sendOpenAICompatMessageNonStream } = await import('../services/api/openaiCompatApi');
+
+ const mockResponse = new Response(JSON.stringify({
+ id: 'test-id',
+ object: 'chat.completion',
+ choices: [{ message: { role: 'assistant', content: 'ok' }, finish_reason: 'stop', index: 0 }],
+ }), { status: 200, headers: { 'Content-Type': 'application/json' } });
+ vi.mocked(fetch).mockResolvedValue(mockResponse);
+
+ await sendOpenAICompatMessageNonStream(
+ 'test-key', 'MiniMax-M2.7', [], [{ text: 'test' }],
+ { temperature: 0.7, topP: 0.9 },
+ new AbortController().signal, vi.fn(), vi.fn(),
+ );
+
+ const body = JSON.parse(vi.mocked(fetch).mock.calls[0][1]?.body as string);
+ expect(body.top_p).toBe(0.9);
+ });
+});
diff --git a/all-model-chat/tests/providerConstants.test.ts b/all-model-chat/tests/providerConstants.test.ts
new file mode 100644
index 00000000..f1042002
--- /dev/null
+++ b/all-model-chat/tests/providerConstants.test.ts
@@ -0,0 +1,82 @@
+import { describe, it, expect } from 'vitest';
+import {
+ isMiniMaxModel,
+ isOpenAICompatModel,
+ getOpenAICompatBaseUrl,
+ MINIMAX_BASE_URL,
+ MINIMAX_MODELS,
+} from '../constants/providerConstants';
+
+describe('providerConstants', () => {
+ describe('MINIMAX_MODELS', () => {
+ it('should contain M2.7 and M2.7-highspeed models', () => {
+ const modelIds = MINIMAX_MODELS.map(m => m.id);
+ expect(modelIds).toContain('MiniMax-M2.7');
+ expect(modelIds).toContain('MiniMax-M2.7-highspeed');
+ });
+
+ it('should have proper display names', () => {
+ const m27 = MINIMAX_MODELS.find(m => m.id === 'MiniMax-M2.7');
+ expect(m27?.name).toBe('MiniMax M2.7');
+ const m27hs = MINIMAX_MODELS.find(m => m.id === 'MiniMax-M2.7-highspeed');
+ expect(m27hs?.name).toBe('MiniMax M2.7 Highspeed');
+ });
+
+ it('should be pinned by default', () => {
+ for (const model of MINIMAX_MODELS) {
+ expect(model.isPinned).toBe(true);
+ }
+ });
+ });
+
+ describe('isMiniMaxModel', () => {
+ it('should return true for MiniMax model IDs', () => {
+ expect(isMiniMaxModel('MiniMax-M2.7')).toBe(true);
+ expect(isMiniMaxModel('MiniMax-M2.7-highspeed')).toBe(true);
+ });
+
+ it('should return false for Gemini model IDs', () => {
+ expect(isMiniMaxModel('gemini-3-flash-preview')).toBe(false);
+ expect(isMiniMaxModel('gemini-2.5-pro')).toBe(false);
+ expect(isMiniMaxModel('gemma-3-27b-it')).toBe(false);
+ });
+
+ it('should return false for empty string', () => {
+ expect(isMiniMaxModel('')).toBe(false);
+ });
+
+ it('should be case sensitive', () => {
+ expect(isMiniMaxModel('minimax-M2.7')).toBe(false);
+ expect(isMiniMaxModel('MINIMAX-M2.7')).toBe(false);
+ });
+ });
+
+ describe('isOpenAICompatModel', () => {
+ it('should return true for MiniMax models', () => {
+ expect(isOpenAICompatModel('MiniMax-M2.7')).toBe(true);
+ expect(isOpenAICompatModel('MiniMax-M2.7-highspeed')).toBe(true);
+ });
+
+ it('should return false for Gemini models', () => {
+ expect(isOpenAICompatModel('gemini-3-flash-preview')).toBe(false);
+ expect(isOpenAICompatModel('gemini-2.5-pro')).toBe(false);
+ });
+ });
+
+ describe('getOpenAICompatBaseUrl', () => {
+ it('should return MiniMax base URL for MiniMax models', () => {
+ expect(getOpenAICompatBaseUrl('MiniMax-M2.7')).toBe(MINIMAX_BASE_URL);
+ expect(getOpenAICompatBaseUrl('MiniMax-M2.7-highspeed')).toBe(MINIMAX_BASE_URL);
+ });
+
+ it('should return empty string for non-MiniMax models', () => {
+ expect(getOpenAICompatBaseUrl('gemini-3-flash-preview')).toBe('');
+ });
+ });
+
+ describe('MINIMAX_BASE_URL', () => {
+ it('should point to MiniMax API', () => {
+ expect(MINIMAX_BASE_URL).toBe('https://api.minimax.io/v1');
+ });
+ });
+});
diff --git a/all-model-chat/types/settings.ts b/all-model-chat/types/settings.ts
index 95b88eb7..50ccfff4 100644
--- a/all-model-chat/types/settings.ts
+++ b/all-model-chat/types/settings.ts
@@ -70,6 +70,7 @@ export interface AppSettings extends ChatSettings {
apiKey: string | null;
apiProxyUrl: string | null;
useApiProxy?: boolean;
+ minimaxApiKey?: string | null;
language: 'en' | 'zh' | 'system';
isStreamingEnabled: boolean;
transcriptionModelId: string;
diff --git a/all-model-chat/utils/modelHelpers.ts b/all-model-chat/utils/modelHelpers.ts
index cd02f88f..019ea325 100644
--- a/all-model-chat/utils/modelHelpers.ts
+++ b/all-model-chat/utils/modelHelpers.ts
@@ -1,9 +1,13 @@
import { ModelOption } from '../types';
import { GEMINI_3_RO_MODELS, STATIC_TTS_MODELS, STATIC_IMAGEN_MODELS, TAB_CYCLE_MODELS, INITIAL_PINNED_MODELS, THINKING_BUDGET_RANGES, MODELS_MANDATORY_THINKING } from '../constants/appConstants';
+import { MINIMAX_MODELS, isMiniMaxModel, isOpenAICompatModel } from '../constants/providerConstants';
import { MediaResolution } from '../types/settings';
import { UsageMetadata } from '@google/genai';
+// Re-export provider helpers for convenience
+export { isMiniMaxModel, isOpenAICompatModel } from '../constants/providerConstants';
+
// --- Model Sorting & Defaults ---
export const sortModels = (models: ModelOption[]): ModelOption[] => {
@@ -57,7 +61,7 @@ export const getDefaultModelOptions = (): ModelOption[] => {
}
return { id, name, isPinned: true };
});
- return sortModels([...pinnedInternalModels, ...STATIC_TTS_MODELS, ...STATIC_IMAGEN_MODELS]);
+ return sortModels([...pinnedInternalModels, ...STATIC_TTS_MODELS, ...STATIC_IMAGEN_MODELS, ...MINIMAX_MODELS]);
};
// --- Helper for Model Capabilities ---
diff --git a/all-model-chat/utils/translations/settings/api.ts b/all-model-chat/utils/translations/settings/api.ts
index 4abd3647..ba4ecab5 100644
--- a/all-model-chat/utils/translations/settings/api.ts
+++ b/all-model-chat/utils/translations/settings/api.ts
@@ -12,4 +12,8 @@ export const apiSettings = {
apiConfig_testFailed: { en: 'Connection Failed', zh: '连接失败' },
apiConfig_vertexExpress: { en: 'Use Vertex AI Express Endpoint', zh: '使用 Vertex AI Express 端点' },
apiConfig_vertexExpress_btn: { en: 'Vertex Express', zh: 'Vertex Express' },
+ settingsMiniMaxConfig: { en: 'MiniMax AI Configuration', zh: 'MiniMax AI 配置' },
+ settingsMiniMaxHelp: { en: 'Enter your MiniMax API key to use MiniMax M2.7 models. Get your key from platform.minimax.chat.', zh: '输入您的 MiniMax API 密钥以使用 MiniMax M2.7 模型。请在 platform.minimax.chat 获取密钥。' },
+ settingsMiniMaxKeyPlaceholder: { en: 'Enter your MiniMax API Key', zh: '输入您的 MiniMax API 密钥' },
+ settingsMiniMaxModelsInfo: { en: 'Available models: MiniMax-M2.7 (204K context), MiniMax-M2.7-highspeed (204K context, faster).', zh: '可用模型:MiniMax-M2.7(204K 上下文),MiniMax-M2.7-highspeed(204K 上下文,更快速)。' },
};
\ No newline at end of file
diff --git a/all-model-chat/vite.config.ts b/all-model-chat/vite.config.ts
index 65d765fe..e2cae9bb 100644
--- a/all-model-chat/vite.config.ts
+++ b/all-model-chat/vite.config.ts
@@ -34,17 +34,21 @@ export default defineConfig(({ mode }) => {
// instance as react-pdf (which is loaded via CDN/importmap).
// This prevents the "Cannot read properties of null (reading 'useReducer')" error.
external: [
- 'react',
- 'react-dom',
- 'react-dom/client',
+ 'react',
+ 'react-dom',
+ 'react-dom/client',
'react/jsx-runtime',
- 'react-pdf',
+ 'react-pdf',
'pdfjs-dist',
'@formkit/auto-animate/react',
'react-virtuoso',
'xlsx'
]
}
+ },
+ test: {
+ environment: 'jsdom',
+ globals: true,
}
};
});
\ No newline at end of file