Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions typescript/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
},
"scripts": {
"build": "tsc -p src",
"test": "npm run build && node --test tests/*.mjs",
"build-all": "npm run build --workspaces",
"prepare": "npm run build-all",
"prepublishOnly": "node -e \"require('fs').copyFileSync('../SECURITY.md','SECURITY.md')\"",
Expand Down
79 changes: 78 additions & 1 deletion typescript/src/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ export interface TypeChatLanguageModel {
* If an `OPENAI_API_KEY` environment variable exists, the `createOpenAILanguageModel` function
* is used to create the instance. The `OPENAI_ENDPOINT` and `OPENAI_MODEL` environment variables
* must also be defined or an exception will be thrown.
* Set `OPENAI_USE_RESPONSES_API=true` to opt-in to the newer OpenAI Responses API
* (`https://api.openai.com/v1/responses`) instead of the default Chat Completions API.
*
* If an `AZURE_OPENAI_API_KEY` environment variable exists, the `createAzureOpenAILanguageModel` function
* is used to create the instance. The `AZURE_OPENAI_ENDPOINT` environment variable must also be defined
Expand All @@ -95,8 +97,12 @@ export function createLanguageModel(env: Record<string, string | undefined>): Ty
if (env.OPENAI_API_KEY) {
const apiKey = env.OPENAI_API_KEY ?? missingEnvironmentVariable("OPENAI_API_KEY");
const model = env.OPENAI_MODEL ?? missingEnvironmentVariable("OPENAI_MODEL");
const endPoint = env.OPENAI_ENDPOINT ?? "https://api.openai.com/v1/chat/completions";
const org = env.OPENAI_ORGANIZATION ?? "";
if (env.OPENAI_USE_RESPONSES_API === "true") {
const endPoint = env.OPENAI_ENDPOINT ?? "https://api.openai.com/v1/responses";
return createOpenAIResponsesLanguageModel(apiKey, model, endPoint, org);
}
const endPoint = env.OPENAI_ENDPOINT ?? "https://api.openai.com/v1/chat/completions";
return createOpenAILanguageModel(apiKey, model, endPoint, org);
}
if (env.AZURE_OPENAI_API_KEY) {
Expand Down Expand Up @@ -141,6 +147,24 @@ export function createAzureOpenAILanguageModel(apiKey: string, endPoint: string)
return createFetchLanguageModel(endPoint, headers, {});
}

/**
* Creates a language model encapsulation of an OpenAI Responses API endpoint.
* This function uses the newer `/v1/responses` endpoint introduced by OpenAI.
* For users of the classic `/v1/chat/completions` endpoint, use `createOpenAILanguageModel` instead.
* @param apiKey The OpenAI API key.
* @param model The model name.
* @param endPoint The URL of the OpenAI Responses API endpoint. Defaults to "https://api.openai.com/v1/responses".
* @param org The OpenAI organization id.
* @returns An instance of `TypeChatLanguageModel`.
*/
export function createOpenAIResponsesLanguageModel(apiKey: string, model: string, endPoint = "https://api.openai.com/v1/responses", org = ""): TypeChatLanguageModel {
const headers = {
"Authorization": `Bearer ${apiKey}`,
"OpenAI-Organization": org
};
return createResponsesFetchLanguageModel(endPoint, headers, { model });
}

/**
* Common OpenAI REST API endpoint encapsulation using the fetch API.
*/
Expand Down Expand Up @@ -187,6 +211,59 @@ function createFetchLanguageModel(url: string, headers: object, defaultParams: o
}
}

/**
* OpenAI Responses API endpoint encapsulation using the fetch API.
* Handles the different request/response format used by `/v1/responses`.
*/
function createResponsesFetchLanguageModel(url: string, headers: object, defaultParams: object) {
const model: TypeChatLanguageModel = {
complete
};
return model;

async function complete(prompt: string | PromptSection[]) {
let retryCount = 0;
const retryMaxAttempts = model.retryMaxAttempts ?? 3;
const retryPauseMs = model.retryPauseMs ?? 1000;
const input = typeof prompt === "string" ? prompt : (prompt as PromptSection[]);
while (true) {
const options = {
method: "POST",
body: JSON.stringify({
...defaultParams,
input,
temperature: 0,
}),
headers: {
"content-type": "application/json",
...headers
}
}
const response = await fetch(url, options);
if (response.ok) {
type ResponsesAPIOutputItem = {
type: string;
role?: string;
content: { type: string; text: string }[];
};
const json = await response.json() as { output: ResponsesAPIOutputItem[] };
const message = json.output?.find(o => o.type === "message");
const textContent = message?.content?.find(c => c.type === "output_text");
if (textContent?.text !== undefined) {
return success(textContent.text);
} else {
return error(`REST API unexpected response format: ${JSON.stringify(json)}`);
}
}
if (!isTransientHttpError(response.status) || retryCount >= retryMaxAttempts) {
return error(`REST API error ${response.status}: ${response.statusText}`);
}
await sleep(retryPauseMs);
retryCount++;
}
}
}

/**
* Returns true of the given HTTP status code represents a transient error.
*/
Expand Down
260 changes: 260 additions & 0 deletions typescript/tests/model.test.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,260 @@
/**
* Tests for model.ts - verifies backward compatibility of Chat Completions API
* and correct behavior of the new Responses API support.
*
* These tests use mocked fetch to avoid requiring real API keys.
*/

import { test, describe, before, after } from "node:test";
import assert from "node:assert/strict";

// Load the compiled module from dist
import { createOpenAILanguageModel, createOpenAIResponsesLanguageModel, createLanguageModel } from "../dist/index.js";

// ---------------------------------------------------------------------------
// Helpers: build mock Response objects
// ---------------------------------------------------------------------------

function makeChatCompletionsResponse(content) {
return {
ok: true,
status: 200,
json: () =>
Promise.resolve({
id: "chatcmpl-123",
object: "chat.completion",
choices: [{ message: { role: "assistant", content } }],
}),
};
}

function makeResponsesAPIResponse(text) {
return {
ok: true,
status: 200,
json: () =>
Promise.resolve({
id: "resp-123",
object: "response",
output: [
{
type: "message",
role: "assistant",
content: [{ type: "output_text", text }],
},
],
}),
};
}

function makeErrorResponse(status, statusText) {
return { ok: false, status, statusText };
}

// ---------------------------------------------------------------------------
// Mock fetch utility
// ---------------------------------------------------------------------------

let capturedRequests = [];
let mockResponses = [];

function setupFetch(responses) {
capturedRequests = [];
mockResponses = [...responses];
globalThis.fetch = async (url, options) => {
capturedRequests.push({ url, options });
const resp = mockResponses.shift();
if (!resp) throw new Error("No mock response configured");
return resp;
};
}

function teardownFetch() {
delete globalThis.fetch;
capturedRequests = [];
mockResponses = [];
}

// ---------------------------------------------------------------------------
// Chat Completions API (backward compatibility)
// ---------------------------------------------------------------------------

describe("createOpenAILanguageModel (Chat Completions API)", () => {
after(teardownFetch);

test("uses /chat/completions endpoint by default", async () => {
setupFetch([makeChatCompletionsResponse("Hello!")]);
const model = createOpenAILanguageModel("sk-test", "gpt-4");
const result = await model.complete("Say hello");
assert.equal(result.success, true);
assert.equal(result.data, "Hello!");
assert.ok(
capturedRequests[0].url.includes("/chat/completions"),
"Expected /chat/completions URL"
);
});

test("sends messages field in request body", async () => {
setupFetch([makeChatCompletionsResponse("Hi!")]);
const model = createOpenAILanguageModel("sk-test", "gpt-4");
await model.complete("Say hi");
const body = JSON.parse(capturedRequests[0].options.body);
assert.ok(Array.isArray(body.messages), "Expected messages array");
assert.equal(body.messages[0].content, "Say hi");
assert.equal(body.messages[0].role, "user");
});

test("parses string content from choices[0].message.content", async () => {
setupFetch([makeChatCompletionsResponse("The answer is 42.")]);
const model = createOpenAILanguageModel("sk-test", "gpt-4");
const result = await model.complete("What is the answer?");
assert.equal(result.success, true);
assert.equal(result.data, "The answer is 42.");
});

test("accepts PromptSection array as input", async () => {
setupFetch([makeChatCompletionsResponse("OK")]);
const model = createOpenAILanguageModel("sk-test", "gpt-4");
const prompt = [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello" },
];
const result = await model.complete(prompt);
assert.equal(result.success, true);
const body = JSON.parse(capturedRequests[0].options.body);
assert.equal(body.messages.length, 2);
});

test("returns error on non-transient HTTP error", async () => {
setupFetch([makeErrorResponse(401, "Unauthorized")]);
const model = createOpenAILanguageModel("invalid-key", "gpt-4");
const result = await model.complete("test");
assert.equal(result.success, false);
assert.ok(result.message.includes("401"));
});
});

// ---------------------------------------------------------------------------
// Responses API
// ---------------------------------------------------------------------------

describe("createOpenAIResponsesLanguageModel (Responses API)", () => {
after(teardownFetch);

test("uses /responses endpoint by default", async () => {
setupFetch([makeResponsesAPIResponse("Hello!")]);
const model = createOpenAIResponsesLanguageModel("sk-test", "gpt-4");
const result = await model.complete("Say hello");
assert.equal(result.success, true);
assert.equal(result.data, "Hello!");
assert.ok(
capturedRequests[0].url.includes("/responses"),
"Expected /responses URL"
);
});

test("sends input field (not messages) in request body", async () => {
setupFetch([makeResponsesAPIResponse("Hi!")]);
const model = createOpenAIResponsesLanguageModel("sk-test", "gpt-4");
await model.complete("Say hi");
const body = JSON.parse(capturedRequests[0].options.body);
assert.ok("input" in body, "Expected input field in request body");
assert.ok(!("messages" in body), "Should NOT have messages field");
});

test("parses text from output[0].content[0].text", async () => {
setupFetch([makeResponsesAPIResponse("The answer is 42.")]);
const model = createOpenAIResponsesLanguageModel("sk-test", "gpt-4");
const result = await model.complete("What is the answer?");
assert.equal(result.success, true);
assert.equal(result.data, "The answer is 42.");
});

test("accepts custom endpoint URL", async () => {
setupFetch([makeResponsesAPIResponse("Custom OK")]);
const customUrl = "https://custom.endpoint.com/v1/responses";
const model = createOpenAIResponsesLanguageModel("sk-test", "gpt-4", customUrl);
await model.complete("test");
assert.equal(capturedRequests[0].url, customUrl);
});

test("returns error on non-transient HTTP error", async () => {
setupFetch([makeErrorResponse(401, "Unauthorized")]);
const model = createOpenAIResponsesLanguageModel("invalid-key", "gpt-4");
const result = await model.complete("test");
assert.equal(result.success, false);
assert.ok(result.message.includes("401"));
});

test("returns error on unexpected response format", async () => {
setupFetch([{
ok: true,
status: 200,
json: () => Promise.resolve({ output: [] }),
}]);
const model = createOpenAIResponsesLanguageModel("sk-test", "gpt-4");
const result = await model.complete("test");
assert.equal(result.success, false);
assert.ok(result.message.includes("unexpected response format"));
});
});

// ---------------------------------------------------------------------------
// createLanguageModel env-var routing
// ---------------------------------------------------------------------------

describe("createLanguageModel environment variable routing", () => {
after(teardownFetch);

test("defaults to Chat Completions API when OPENAI_USE_RESPONSES_API is not set", async () => {
setupFetch([makeChatCompletionsResponse("OK")]);
const model = createLanguageModel({
OPENAI_API_KEY: "sk-test",
OPENAI_MODEL: "gpt-4",
});
const result = await model.complete("test");
assert.equal(result.success, true);
assert.ok(capturedRequests[0].url.includes("/chat/completions"));
});

test("uses Responses API when OPENAI_USE_RESPONSES_API=true", async () => {
setupFetch([makeResponsesAPIResponse("OK")]);
const model = createLanguageModel({
OPENAI_API_KEY: "sk-test",
OPENAI_MODEL: "gpt-4",
OPENAI_USE_RESPONSES_API: "true",
});
const result = await model.complete("test");
assert.equal(result.success, true);
assert.ok(capturedRequests[0].url.includes("/responses"));
});

test("OPENAI_ENDPOINT overrides default endpoint (Chat Completions path)", async () => {
setupFetch([makeChatCompletionsResponse("OK")]);
const customUrl = "https://proxy.example.com/v1/chat/completions";
const model = createLanguageModel({
OPENAI_API_KEY: "sk-test",
OPENAI_MODEL: "gpt-4",
OPENAI_ENDPOINT: customUrl,
});
await model.complete("test");
assert.equal(capturedRequests[0].url, customUrl);
});

test("OPENAI_ENDPOINT overrides default when Responses API is selected", async () => {
setupFetch([makeResponsesAPIResponse("OK")]);
const customUrl = "https://proxy.example.com/v1/responses";
const model = createLanguageModel({
OPENAI_API_KEY: "sk-test",
OPENAI_MODEL: "gpt-4",
OPENAI_USE_RESPONSES_API: "true",
OPENAI_ENDPOINT: customUrl,
});
await model.complete("test");
assert.equal(capturedRequests[0].url, customUrl);
});

test("throws when OPENAI_API_KEY and AZURE_OPENAI_API_KEY are both missing", () => {
assert.throws(() => createLanguageModel({}), /Missing environment variable/);
});
});