Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
161 changes: 157 additions & 4 deletions cli.js
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ const RECENT_CONFIGS_FILE = path.join(CONFIG_DIR, 'recent-configs.json');
const WORKFLOW_DEFINITIONS_FILE = path.join(CONFIG_DIR, 'codexmate-workflows.json');
const WORKFLOW_RUNS_FILE = path.join(CONFIG_DIR, 'codexmate-workflow-runs.jsonl');
const DEFAULT_CLAUDE_MODEL = 'glm-4.7';
const DEFAULT_MODEL_CONTEXT_WINDOW = 190000;
const DEFAULT_MODEL_AUTO_COMPACT_TOKEN_LIMIT = 185000;
const CODEX_BACKUP_NAME = 'codex-config';

const DEFAULT_MODELS = ['gpt-5.3-codex', 'gpt-5.1-codex-max', 'gpt-4-turbo', 'gpt-4'];
Expand Down Expand Up @@ -226,6 +228,8 @@ function resolveWebHost(options = {}) {

const EMPTY_CONFIG_FALLBACK_TEMPLATE = `model = "gpt-5.3-codex"
model_reasoning_effort = "high"
model_context_window = ${DEFAULT_MODEL_CONTEXT_WINDOW}
model_auto_compact_token_limit = ${DEFAULT_MODEL_AUTO_COMPACT_TOKEN_LIMIT}
disable_response_storage = true
approval_policy = "never"
sandbox_mode = "danger-full-access"
Expand Down Expand Up @@ -3168,6 +3172,8 @@ function buildDefaultConfigContent(initializedAt) {

model_provider = "openai"
model = "${defaultModel}"
model_context_window = ${DEFAULT_MODEL_CONTEXT_WINDOW}
model_auto_compact_token_limit = ${DEFAULT_MODEL_AUTO_COMPACT_TOKEN_LIMIT}

[model_providers.openai]
name = "openai"
Expand Down Expand Up @@ -3333,6 +3339,45 @@ function applyReasoningEffortToTemplate(template, reasoningEffort) {
return content;
}

function normalizePositiveIntegerParam(value) {
if (value === undefined || value === null) {
return null;
}
const text = typeof value === 'number'
? String(value)
: (typeof value === 'string' ? value.trim() : String(value).trim());
if (!text) {
return null;
}
if (!/^\d+$/.test(text)) {
return null;
}
const parsed = Number.parseInt(text, 10);
if (!Number.isSafeInteger(parsed) || parsed <= 0) {
return null;
}
return parsed;
}

function applyPositiveIntegerConfigToTemplate(template, key, value) {
let content = typeof template === 'string' ? template : '';
const normalized = normalizePositiveIntegerParam(value);
if (!key || normalized === null) {
return content;
}

const hasBom = content.charCodeAt(0) === 0xFEFF;
const lineEnding = content.includes('\r\n') ? '\r\n' : '\n';
if (hasBom) {
content = content.slice(1);
}
const escapedKey = key.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const pattern = new RegExp(`^\\s*${escapedKey}\\s*=\\s*[^\\n]*\\n?`, 'gmi');
content = content.replace(pattern, '');
content = content.replace(new RegExp(`^(?:[\\t ]*${lineEnding})+`), '');
return `${hasBom ? '\uFEFF' : ''}${key} = ${normalized}${lineEnding}${content}`;
}

function getConfigTemplate(params = {}) {
let content = EMPTY_CONFIG_FALLBACK_TEMPLATE;
if (fs.existsSync(CONFIG_FILE)) {
Expand All @@ -3343,6 +3388,20 @@ function getConfigTemplate(params = {}) {
}
} catch (e) {}
}
if (
params.modelAutoCompactTokenLimit !== undefined
&& params.modelAutoCompactTokenLimit !== null
&& normalizePositiveIntegerParam(params.modelAutoCompactTokenLimit) === null
) {
return { error: 'modelAutoCompactTokenLimit must be a positive integer' };
}
if (
params.modelContextWindow !== undefined
&& params.modelContextWindow !== null
&& normalizePositiveIntegerParam(params.modelContextWindow) === null
) {
return { error: 'modelContextWindow must be a positive integer' };
}
const selectedProvider = typeof params.provider === 'string' ? params.provider.trim() : '';
const selectedModel = typeof params.model === 'string' ? params.model.trim() : '';
let template = normalizeTopLevelConfigWithTemplate(content, selectedProvider, selectedModel);
Expand All @@ -3352,11 +3411,54 @@ function getConfigTemplate(params = {}) {
if (typeof params.reasoningEffort === 'string') {
template = applyReasoningEffortToTemplate(template, params.reasoningEffort);
}
if (!/^\s*model_auto_compact_token_limit\s*=.*$/m.test(template)) {
template = applyPositiveIntegerConfigToTemplate(
template,
'model_auto_compact_token_limit',
DEFAULT_MODEL_AUTO_COMPACT_TOKEN_LIMIT
);
}
if (!/^\s*model_context_window\s*=.*$/m.test(template)) {
template = applyPositiveIntegerConfigToTemplate(
template,
'model_context_window',
DEFAULT_MODEL_CONTEXT_WINDOW
);
}
if (params.modelAutoCompactTokenLimit !== undefined) {
template = applyPositiveIntegerConfigToTemplate(
template,
'model_auto_compact_token_limit',
params.modelAutoCompactTokenLimit
);
}
if (params.modelContextWindow !== undefined) {
template = applyPositiveIntegerConfigToTemplate(
template,
'model_context_window',
params.modelContextWindow
);
}
return {
template
};
}

function readPositiveIntegerConfigValue(config, key) {
const options = arguments[2] && typeof arguments[2] === 'object' ? arguments[2] : {};
const useDefaultsWhenMissing = options.useDefaultsWhenMissing !== false;
if (!config || typeof config !== 'object' || !key) {
return '';
}
const raw = config[key];
if (raw === undefined && useDefaultsWhenMissing) {
if (key === 'model_context_window') return DEFAULT_MODEL_CONTEXT_WINDOW;
if (key === 'model_auto_compact_token_limit') return DEFAULT_MODEL_AUTO_COMPACT_TOKEN_LIMIT;
}
const normalized = normalizePositiveIntegerParam(raw);
return normalized === null ? '' : normalized;
}

function applyConfigTemplate(params = {}) {
const template = typeof params.template === 'string' ? params.template : '';
if (!template.trim()) {
Expand All @@ -3370,6 +3472,20 @@ function applyConfigTemplate(params = {}) {
return { error: `模板 TOML 解析失败: ${e.message}` };
}

if (
Object.prototype.hasOwnProperty.call(parsed, 'model_context_window')
&& normalizePositiveIntegerParam(parsed.model_context_window) === null
) {
return { error: '模板中的 model_context_window 必须是正整数' };
}

if (
Object.prototype.hasOwnProperty.call(parsed, 'model_auto_compact_token_limit')
&& normalizePositiveIntegerParam(parsed.model_auto_compact_token_limit) === null
) {
return { error: '模板中的 model_auto_compact_token_limit 必须是正整数' };
}

if (!parsed.model_provider || typeof parsed.model_provider !== 'string') {
return { error: '模板缺少 model_provider' };
}
Expand Down Expand Up @@ -9976,22 +10092,38 @@ function createWebServer({ htmlPath, assetsDir, webDir, host, port, openBrowser
let result;

switch (action) {
case 'status':
case 'status': {
const statusConfigResult = readConfigOrVirtualDefault();
const config = statusConfigResult.config;
const serviceTier = typeof config.service_tier === 'string' ? config.service_tier.trim() : '';
const modelReasoningEffort = typeof config.model_reasoning_effort === 'string' ? config.model_reasoning_effort.trim() : '';
const budgetReadOptions = {
useDefaultsWhenMissing: !hasConfigLoadError(statusConfigResult)
};
const modelContextWindow = readPositiveIntegerConfigValue(
config,
'model_context_window',
budgetReadOptions
);
const modelAutoCompactTokenLimit = readPositiveIntegerConfigValue(
config,
'model_auto_compact_token_limit',
budgetReadOptions
);
result = {
provider: config.model_provider || '未设置',
model: config.model || '未设置',
serviceTier,
modelReasoningEffort,
modelContextWindow,
modelAutoCompactTokenLimit,
configReady: !statusConfigResult.isVirtual,
configErrorType: statusConfigResult.errorType || '',
configNotice: statusConfigResult.reason || '',
initNotice: consumeInitNotice()
};
break;
}
case 'install-status':
result = buildInstallStatusReport();
break;
Expand Down Expand Up @@ -11464,11 +11596,26 @@ function buildMcpStatusPayload() {
const config = statusConfigResult.config;
const serviceTier = typeof config.service_tier === 'string' ? config.service_tier.trim() : '';
const modelReasoningEffort = typeof config.model_reasoning_effort === 'string' ? config.model_reasoning_effort.trim() : '';
const budgetReadOptions = {
useDefaultsWhenMissing: !hasConfigLoadError(statusConfigResult)
};
const modelContextWindow = readPositiveIntegerConfigValue(
config,
'model_context_window',
budgetReadOptions
);
const modelAutoCompactTokenLimit = readPositiveIntegerConfigValue(
config,
'model_auto_compact_token_limit',
budgetReadOptions
);
return {
provider: config.model_provider || '未设置',
model: config.model || '未设置',
serviceTier,
modelReasoningEffort,
modelContextWindow,
modelAutoCompactTokenLimit,
configReady: !statusConfigResult.isVirtual,
configErrorType: statusConfigResult.errorType || '',
configNotice: statusConfigResult.reason || '',
Expand Down Expand Up @@ -11566,6 +11713,8 @@ const BUILTIN_WORKFLOW_DEFINITIONS = Object.freeze({
model: { type: 'string' },
serviceTier: { type: 'string' },
reasoningEffort: { type: 'string' },
modelContextWindow: { type: ['string', 'number'] },
modelAutoCompactTokenLimit: { type: ['string', 'number'] },
apply: { type: 'boolean' }
},
required: ['provider'],
Expand All @@ -11580,7 +11729,9 @@ const BUILTIN_WORKFLOW_DEFINITIONS = Object.freeze({
provider: '{{input.provider}}',
model: '{{input.model}}',
serviceTier: '{{input.serviceTier}}',
reasoningEffort: '{{input.reasoningEffort}}'
reasoningEffort: '{{input.reasoningEffort}}',
modelContextWindow: '{{input.modelContextWindow}}',
modelAutoCompactTokenLimit: '{{input.modelAutoCompactTokenLimit}}'
}
},
{
Expand Down Expand Up @@ -12149,15 +12300,17 @@ function createMcpTools(options = {}) {

pushTool({
name: 'codexmate.config.template.get',
description: 'Get Codex config template with optional provider/model/service tier/reasoning effort.',
description: 'Get Codex config template with optional provider/model/service tier/reasoning effort/context budget.',
readOnly: true,
inputSchema: {
type: 'object',
properties: {
provider: { type: 'string' },
model: { type: 'string' },
serviceTier: { type: 'string' },
reasoningEffort: { type: 'string' }
reasoningEffort: { type: 'string' },
modelContextWindow: { type: ['string', 'number'] },
modelAutoCompactTokenLimit: { type: ['string', 'number'] }
},
additionalProperties: false
},
Expand Down
75 changes: 73 additions & 2 deletions tests/e2e/test-config.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ module.exports = async function testConfig(ctx) {
assert(typeof apiStatus.configReady === 'boolean', 'api status configReady missing');
assert('modelReasoningEffort' in apiStatus, 'api status modelReasoningEffort missing');
assert('serviceTier' in apiStatus, 'api status serviceTier missing');
assert('modelContextWindow' in apiStatus, 'api status modelContextWindow missing');
assert('modelAutoCompactTokenLimit' in apiStatus, 'api status modelAutoCompactTokenLimit missing');
assert(apiStatus.modelContextWindow === 190000, 'api status modelContextWindow mismatch');
assert(apiStatus.modelAutoCompactTokenLimit === 185000, 'api status modelAutoCompactTokenLimit mismatch');

// ========== List API Tests ==========
const apiList = await api('list');
Expand Down Expand Up @@ -83,26 +87,78 @@ module.exports = async function testConfig(ctx) {
assert(typeof templateReasoningXhigh.template === 'string', 'get-config-template(reasoning xhigh) missing template');
assert(/^\s*model_reasoning_effort\s*=\s*"xhigh"\s*$/m.test(templateReasoningXhigh.template), 'get-config-template(reasoning xhigh) missing model_reasoning_effort');

// ========== Get Config Template Tests - Context Budget ==========
const templateContextBudget = await api('get-config-template', {
provider: 'shadow',
model: 'shadow-model',
modelContextWindow: 200000,
modelAutoCompactTokenLimit: 195000
});
assert(typeof templateContextBudget.template === 'string', 'get-config-template(context budget) missing template');
assert(templateContextBudget.template.includes('model_provider = "shadow"'), 'get-config-template(context budget) missing provider override');
assert(templateContextBudget.template.includes('model = "shadow-model"'), 'get-config-template(context budget) missing model override');
assert(/^\s*model_context_window\s*=\s*200000\s*$/m.test(templateContextBudget.template), 'get-config-template(context budget) missing model_context_window');
assert(/^\s*model_auto_compact_token_limit\s*=\s*195000\s*$/m.test(templateContextBudget.template), 'get-config-template(context budget) missing model_auto_compact_token_limit');

// ========== Get Config Template Tests - Combined ==========
const templateCombined = await api('get-config-template', {
provider: 'shadow',
model: 'shadow-model',
serviceTier: 'fast',
reasoningEffort: 'high'
reasoningEffort: 'high',
modelContextWindow: 190000,
modelAutoCompactTokenLimit: 185000
});
assert(typeof templateCombined.template === 'string', 'get-config-template(combined) missing template');
assert(/^\s*service_tier\s*=\s*"fast"\s*$/m.test(templateCombined.template), 'get-config-template(combined) missing service_tier');
assert(/^\s*model_reasoning_effort\s*=\s*"high"\s*$/m.test(templateCombined.template), 'get-config-template(combined) missing model_reasoning_effort');
assert(/^\s*model_context_window\s*=\s*190000\s*$/m.test(templateCombined.template), 'get-config-template(combined) missing model_context_window');
assert(/^\s*model_auto_compact_token_limit\s*=\s*185000\s*$/m.test(templateCombined.template), 'get-config-template(combined) missing model_auto_compact_token_limit');

const templateCombinedXhigh = await api('get-config-template', {
provider: 'shadow',
model: 'shadow-model',
serviceTier: 'fast',
reasoningEffort: 'xhigh'
reasoningEffort: 'xhigh',
modelContextWindow: 210000,
modelAutoCompactTokenLimit: 200000
});
assert(typeof templateCombinedXhigh.template === 'string', 'get-config-template(combined xhigh) missing template');
assert(/^\s*service_tier\s*=\s*"fast"\s*$/m.test(templateCombinedXhigh.template), 'get-config-template(combined xhigh) missing service_tier');
assert(/^\s*model_reasoning_effort\s*=\s*"xhigh"\s*$/m.test(templateCombinedXhigh.template), 'get-config-template(combined xhigh) missing model_reasoning_effort');
assert(/^\s*model_context_window\s*=\s*210000\s*$/m.test(templateCombinedXhigh.template), 'get-config-template(combined xhigh) missing model_context_window');
assert(/^\s*model_auto_compact_token_limit\s*=\s*200000\s*$/m.test(templateCombinedXhigh.template), 'get-config-template(combined xhigh) missing model_auto_compact_token_limit');

// ========== Apply Config Template Validation Tests ==========
const invalidContextBudgetApply = await api('apply-config-template', {
template: `model_provider = "shadow"
model = "shadow-model"
model_context_window = 0

[model_providers.shadow]
base_url = "https://example.test/v1"
preferred_auth_method = "shadow-key"
`
});
assert(
invalidContextBudgetApply.error === '模板中的 model_context_window 必须是正整数',
'apply-config-template should reject invalid model_context_window'
);

const invalidAutoCompactApply = await api('apply-config-template', {
template: `model_provider = "shadow"
model = "shadow-model"
model_auto_compact_token_limit = "abc"

[model_providers.shadow]
base_url = "https://example.test/v1"
preferred_auth_method = "shadow-key"
`
});
assert(
invalidAutoCompactApply.error === '模板中的 model_auto_compact_token_limit 必须是正整数',
'apply-config-template should reject invalid model_auto_compact_token_limit'
);

// ========== Export Config Tests ==========
const exportResult = await api('export-config', { includeKeys: true });
Expand Down Expand Up @@ -337,6 +393,21 @@ module.exports = async function testConfig(ctx) {
await waitForServer(legacyPort);

const legacyApi = (action, params) => postJson(legacyPort, { action, params }, 2000);
const legacyStatus = await legacyApi('status');
assert(legacyStatus.modelContextWindow === 190000, 'legacy status should default modelContextWindow');
assert(
legacyStatus.modelAutoCompactTokenLimit === 185000,
'legacy status should default modelAutoCompactTokenLimit'
);
const legacyTemplateDefaults = await legacyApi('get-config-template', {});
assert(
/^\s*model_context_window\s*=\s*190000\s*$/m.test(legacyTemplateDefaults.template),
'legacy get-config-template should restore default model_context_window'
);
assert(
/^\s*model_auto_compact_token_limit\s*=\s*185000\s*$/m.test(legacyTemplateDefaults.template),
'legacy get-config-template should restore default model_auto_compact_token_limit'
);
const legacyAddDup = await legacyApi('add-provider', {
name: 'foo.bar',
url: 'https://dup.example.com/v1',
Expand Down
Loading
Loading