consumer) {
+ return new OpenAIClientImplWrapper(
+ (OpenAIClientImpl) delegate.withOptions(consumer), deploymentModel);
+ }
+
+ @Nonnull
+ @Override
+ public ChatService chat() {
+ return new AiCoreChatService(delegate.chat(), deploymentModel);
+ }
+
+ @Override
+ @Nonnull
+ public ResponseService responses() {
+ return new AiCoreResponseService(delegate.responses(), deploymentModel);
+ }
+
+ /**
+ * Methods that are delegated to the underlying OpenAI client.
+ *
+ * Note: Most of these methods will throw {@link UnsupportedOperationException} at runtime due
+ * to endpoint constraints enforced in {@code AiCoreHttpClientImpl}.
+ *
+ * @see AiCoreHttpClientImpl#validateAllowedEndpoint
+ */
+ private interface OtherMethods {
+ OpenAIClientAsync async();
+
+ OpenAIClient.WithRawResponse withRawResponse();
+
+ OpenAIClient withOptions(Consumer modifier);
+
+ CompletionService completions();
+
+ EmbeddingService embeddings();
+
+ FileService files();
+
+ ImageService images();
+
+ AudioService audio();
+
+ ModerationService moderations();
+
+ ModelService models();
+
+ FineTuningService fineTuning();
+
+ GraderService graders();
+
+ VectorStoreService vectorStores();
+
+ WebhookService webhooks();
+
+ BetaService beta();
+
+ BatchService batches();
+
+ UploadService uploads();
+
+ RealtimeService realtime();
+
+ ConversationService conversations();
+
+ EvalService evals();
+
+ ContainerService containers();
+
+ SkillService skills();
+
+ VideoService videos();
+
+ void close();
+ }
+}
diff --git a/foundation-models/openai/src/test/java/com/sap/ai/sdk/foundationmodels/openai/AiCoreOpenAiClientTest.java b/foundation-models/openai/src/test/java/com/sap/ai/sdk/foundationmodels/openai/AiCoreOpenAiClientTest.java
index 26687df52..9a3e8667e 100644
--- a/foundation-models/openai/src/test/java/com/sap/ai/sdk/foundationmodels/openai/AiCoreOpenAiClientTest.java
+++ b/foundation-models/openai/src/test/java/com/sap/ai/sdk/foundationmodels/openai/AiCoreOpenAiClientTest.java
@@ -1,19 +1,25 @@
package com.sap.ai.sdk.foundationmodels.openai;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
import com.github.tomakehurst.wiremock.junit5.WireMockTest;
import com.openai.client.OpenAIClient;
+import com.openai.models.ChatModel;
+import com.openai.models.chat.completions.ChatCompletion;
+import com.openai.models.chat.completions.ChatCompletionCreateParams;
import com.openai.models.responses.Response;
import com.openai.models.responses.ResponseCreateParams;
import com.openai.models.responses.ResponseStatus;
import com.sap.cloud.sdk.cloudplatform.connectivity.ApacheHttpClient5Accessor;
import com.sap.cloud.sdk.cloudplatform.connectivity.ApacheHttpClient5Cache;
import com.sap.cloud.sdk.cloudplatform.connectivity.DefaultHttpDestination;
+import java.util.concurrent.CompletableFuture;
import javax.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
@WireMockTest
@@ -23,13 +29,9 @@ class AiCoreOpenAiClientTest {
@BeforeEach
void setup(@Nonnull final WireMockRuntimeInfo server) {
- // Create destination pointing to WireMock server
final var destination = DefaultHttpDestination.builder(server.getHttpBaseUrl()).build();
+ client = AiCoreOpenAiClient.fromDestination(destination, OpenAiModel.GPT_5);
- // Create OpenAI client using our custom implementation
- client = AiCoreOpenAiClient.fromDestination(destination);
-
- // Disable HTTP client caching for tests to ensure fresh clients
ApacheHttpClient5Accessor.setHttpClientCache(ApacheHttpClient5Cache.DISABLED);
}
@@ -40,18 +42,85 @@ void reset() {
}
@Test
- void testResponseSuccess() {
+ void testResponseServiceSuccessWithMatchingModel() {
final var params =
ResponseCreateParams.builder()
.input("What is the capital of France?")
- .model("gpt-5")
+ .model(ChatModel.GPT_5)
.build();
final Response response = client.responses().create(params);
assertThat(response).isNotNull();
- assertThat(response.id()).isEqualTo("resp_01a38d2783b385be0069bd43d260108193aef990678aa8a0af");
assertThat(response.status().orElseThrow()).isEqualTo(ResponseStatus.COMPLETED);
- assertThat(response.output()).isNotEmpty();
+ }
+
+ @Test
+ void testResponseServiceSuccessWithoutModel() {
+ final var params =
+ ResponseCreateParams.builder().input("What is the capital of France?").build();
+ final Response response = client.responses().create(params);
+
+ assertThat(response).isNotNull();
+ assertThat(response.status().orElseThrow()).isEqualTo(ResponseStatus.COMPLETED);
+ }
+
+ @Test
+ void testResponseServiceFailsWithModelMismatch() {
+ final var params =
+ ResponseCreateParams.builder()
+ .input("What is the capital of France?")
+ .model(ChatModel.GPT_4) // Different from client's expected model "gpt-5"
+ .build();
+
+ assertThatThrownBy(() -> client.responses().create(params))
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Model mismatch")
+ .hasMessageContaining("gpt-5")
+ .hasMessageContaining("gpt-4");
+ }
+
+ @Test
+ void testResponseServiceWithOptions() {
+ final var service = client.responses();
+
+ final var modifiedService =
+ service.withOptions(
+ builder -> {
+ // Modify some option
+ builder.putHeader("X-Custom-Header", "test-value");
+ });
+
+ assertThat(modifiedService).isInstanceOf(AiCoreResponseService.class);
+ }
+
+ @Test
+ void testChatCompletionServiceSuccessWithMatchingModel() {
+ final var params =
+ ChatCompletionCreateParams.builder()
+ .model(ChatModel.GPT_5)
+ .addUserMessage("Say this is a test")
+ .build();
+
+ final ChatCompletion response = client.chat().completions().create(params);
+ assertThat(response).isNotNull();
+ }
+
+ @Test
+ @Disabled("Fails as Async client needs additional wrappers. Maintenance wall.")
+ void testAsyncChatCompletion() {
+ final var params =
+ ChatCompletionCreateParams.builder()
+ .model(ChatModel.GPT_5)
+ .addUserMessage("Say this is a test")
+ .build();
+
+ final CompletableFuture future =
+ client.async().chat().completions().create(params);
+
+ final ChatCompletion response = future.join();
+
+ assertThat(response).isNotNull();
+ assertThat(response.choices()).isNotEmpty();
}
}
diff --git a/foundation-models/openai/src/test/resources/mappings/chatCompletion.json b/foundation-models/openai/src/test/resources/mappings/chatCompletion.json
new file mode 100644
index 000000000..bb5358a84
--- /dev/null
+++ b/foundation-models/openai/src/test/resources/mappings/chatCompletion.json
@@ -0,0 +1,104 @@
+{
+ "request": {
+ "method": "POST",
+ "urlPattern": "/chat/completions\\?api-version=2024-02-01",
+ "bodyPatterns": [
+ {
+ "equalToJson": {
+ "messages": [
+ {
+ "content": "Say this is a test",
+ "role": "user"
+ }
+ ],
+ "model": "gpt-5"
+ }
+ }
+ ]
+ },
+ "response": {
+ "status": 200,
+ "headers": {
+ "Content-Type": "application/json",
+ "x-request-id": "f181d24e-f41e-9396-a195-6d1334bfe952",
+ "ai-inference-id": "f181d24e-f41e-9396-a195-6d1334bfe952",
+ "x-upstream-service-time": "3177"
+ },
+ "jsonBody": {
+ "choices": [
+ {
+ "content_filter_results": {
+ "hate": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "self_harm": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "sexual": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "violence": {
+ "filtered": false,
+ "severity": "safe"
+ }
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null,
+ "message": {
+ "annotations": [],
+ "content": "This is a test.",
+ "refusal": null,
+ "role": "assistant"
+ }
+ }
+ ],
+ "created": 1775053782,
+ "id": "chatcmpl-DPqreavBOHfKfV0orguq4jK5Gbmmh",
+ "model": "gpt-5-2025-08-07",
+ "object": "chat.completion",
+ "prompt_filter_results": [
+ {
+ "content_filter_results": {
+ "hate": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "self_harm": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "sexual": {
+ "filtered": false,
+ "severity": "safe"
+ },
+ "violence": {
+ "filtered": false,
+ "severity": "safe"
+ }
+ },
+ "prompt_index": 0
+ }
+ ],
+ "system_fingerprint": null,
+ "usage": {
+ "completion_tokens": 271,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 256,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens": 11,
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ },
+ "total_tokens": 282
+ }
+ }
+ }
+}
\ No newline at end of file