This is an automated email from the ASF dual-hosted git repository.
young pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git
The following commit(s) were added to refs/heads/master by this push:
new 80c587e42 feat: support anthropic openai api (#12881)
80c587e42 is described below
commit 80c587e42f60a9eacd22c100cb9b34040412084f
Author: YYYoung <[email protected]>
AuthorDate: Wed Jan 14 09:08:51 2026 +0800
feat: support anthropic openai api (#12881)
---
apisix/plugins/ai-drivers/anthropic.lua | 24 +++
apisix/plugins/ai-drivers/schema.lua | 1 +
docs/en/latest/plugins/ai-proxy-multi.md | 6 +-
docs/en/latest/plugins/ai-proxy.md | 6 +-
docs/en/latest/plugins/ai-request-rewrite.md | 2 +-
docs/zh/latest/plugins/ai-proxy-multi.md | 6 +-
docs/zh/latest/plugins/ai-proxy.md | 6 +-
docs/zh/latest/plugins/ai-request-rewrite.md | 2 +-
t/plugin/ai-proxy-anthropic.t | 298 +++++++++++++++++++++++++++
9 files changed, 337 insertions(+), 14 deletions(-)
diff --git a/apisix/plugins/ai-drivers/anthropic.lua
b/apisix/plugins/ai-drivers/anthropic.lua
new file mode 100644
index 000000000..492e0f442
--- /dev/null
+++ b/apisix/plugins/ai-drivers/anthropic.lua
@@ -0,0 +1,24 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+return require("apisix.plugins.ai-drivers.openai-base").new(
+ {
+ host = "api.anthropic.com",
+ path = "/v1/chat/completions",
+ port = 443
+ }
+)
diff --git a/apisix/plugins/ai-drivers/schema.lua
b/apisix/plugins/ai-drivers/schema.lua
index 16fee444b..199bd3d1a 100644
--- a/apisix/plugins/ai-drivers/schema.lua
+++ b/apisix/plugins/ai-drivers/schema.lua
@@ -45,6 +45,7 @@ local openai_compatible_list = {
"openai",
"deepseek",
"aimlapi",
+ "anthropic",
"openai-compatible",
"azure-openai",
"openrouter",
diff --git a/docs/en/latest/plugins/ai-proxy-multi.md
b/docs/en/latest/plugins/ai-proxy-multi.md
index 83ffced33..164ecdbbc 100644
--- a/docs/en/latest/plugins/ai-proxy-multi.md
+++ b/docs/en/latest/plugins/ai-proxy-multi.md
@@ -7,7 +7,7 @@ keywords:
- ai-proxy-multi
- AI
- LLM
-description: The ai-proxy-multi Plugin extends the capabilities of ai-proxy
with load balancing, retries, fallbacks, and health chekcs, simplifying the
integration with OpenAI, DeepSeek, Azure, AIMLAPI, OpenRouter and other
OpenAI-compatible APIs.
+description: The ai-proxy-multi Plugin extends the capabilities of ai-proxy
with load balancing, retries, fallbacks, and health chekcs, simplifying the
integration with OpenAI, DeepSeek, Azure, AIMLAPI, Anthropic, OpenRouter, and
other OpenAI-compatible APIs.
---
<!--
@@ -35,7 +35,7 @@ description: The ai-proxy-multi Plugin extends the
capabilities of ai-proxy with
## Description
-The `ai-proxy-multi` Plugin simplifies access to LLM and embedding models by
transforming Plugin configurations into the designated request format for
OpenAI, DeepSeek, Azure, AIMLAPI, OpenRouter, and other OpenAI-compatible APIs.
It extends the capabilities of [`ai-proxy`](./ai-proxy.md) with load balancing,
retries, fallbacks, and health checks.
+The `ai-proxy-multi` Plugin simplifies access to LLM and embedding models by
transforming Plugin configurations into the designated request format for
OpenAI, DeepSeek, Azure, AIMLAPI, Anthropic, OpenRouter, and other
OpenAI-compatible APIs. It extends the capabilities of
[`ai-proxy`](./ai-proxy.md) with load balancing, retries, fallbacks, and health
checks.
In addition, the Plugin also supports logging LLM request information in the
access log, such as token usage, model, time to the first response, and more.
@@ -58,7 +58,7 @@ In addition, the Plugin also supports logging LLM request
information in the acc
| balancer.key | string | False |
| | Used when `type` is `chash`. When
`hash_on` is set to `header` or `cookie`, `key` is required. When `hash_on` is
set to `consumer`, `key` is not required as the consumer name will be used as
the key automatically. |
| instances | array[object] | True |
| | LLM instance configurations. |
| instances.name | string | True |
| | Name of the LLM service instance. |
-| instances.provider | string | True |
| [openai, deepseek, azure-openai, aimlapi, openrouter,
openai-compatible] | LLM service provider. When set to `openai`, the Plugin
will proxy the request to `api.openai.com`. When set to `deepseek`, the Plugin
will proxy the request to `api.deepseek.com`. When set to `aimlapi`, the Plugin
uses the OpenAI-compatible driver and proxies the request to `api.aimlapi.com`
by default. When set [...]
+| instances.provider | string | True |
| [openai, deepseek, azure-openai, aimlapi, anthropic,
openrouter, openai-compatible] | LLM service provider. When set to `openai`,
the Plugin will proxy the request to `api.openai.com`. When set to `deepseek`,
the Plugin will proxy the request to `api.deepseek.com`. When set to `aimlapi`,
the Plugin uses the OpenAI-compatible driver and proxies the request to
`api.aimlapi.com` by default [...]
| instances.priority | integer | False | 0
| | Priority of the LLM instance in load
balancing. `priority` takes precedence over `weight`. |
| instances.weight | string | True | 0
| greater or equal to 0 | Weight of the LLM instance in
load balancing. |
| instances.auth | object | True |
| | Authentication configurations. |
diff --git a/docs/en/latest/plugins/ai-proxy.md
b/docs/en/latest/plugins/ai-proxy.md
index af64696a4..0ecbeac70 100644
--- a/docs/en/latest/plugins/ai-proxy.md
+++ b/docs/en/latest/plugins/ai-proxy.md
@@ -7,7 +7,7 @@ keywords:
- ai-proxy
- AI
- LLM
-description: The ai-proxy Plugin simplifies access to LLM and embedding models
providers by converting Plugin configurations into the required request format
for OpenAI, DeepSeek, Azure, AIMLAPI, OpenRouter, and other OpenAI-compatible
APIs.
+description: The ai-proxy Plugin simplifies access to LLM and embedding models
providers by converting Plugin configurations into the required request format
for OpenAI, DeepSeek, Azure, AIMLAPI, Anthropic, OpenRouter, and other
OpenAI-compatible APIs.
---
<!--
@@ -35,7 +35,7 @@ description: The ai-proxy Plugin simplifies access to LLM and
embedding models p
## Description
-The `ai-proxy` Plugin simplifies access to LLM and embedding models by
transforming Plugin configurations into the designated request format. It
supports the integration with OpenAI, DeepSeek, Azure, AIMLAPI, OpenRouter, and
other OpenAI-compatible APIs.
+The `ai-proxy` Plugin simplifies access to LLM and embedding models by
transforming Plugin configurations into the designated request format. It
supports the integration with OpenAI, DeepSeek, Azure, AIMLAPI, Anthropic,
OpenRouter, and other OpenAI-compatible APIs.
In addition, the Plugin also supports logging LLM request information in the
access log, such as token usage, model, time to the first response, and more.
@@ -51,7 +51,7 @@ In addition, the Plugin also supports logging LLM request
information in the acc
| Name | Type | Required | Default | Valid values
| Description |
|--------------------|--------|----------|---------|------------------------------------------|-------------|
-| provider | string | True | | [openai, deepseek,
azure-openai, aimlapi, openrouter, openai-compatible] | LLM service provider.
When set to `openai`, the Plugin will proxy the request to
`https://api.openai.com/chat/completions`. When set to `deepseek`, the Plugin
will proxy the request to `https://api.deepseek.com/chat/completions`. When set
to `aimlapi`, the Plugin uses the OpenAI-compatible driver and proxies the
request to `https://api.aimlapi.com/v1/chat/comple [...]
+| provider | string | True | | [openai, deepseek,
azure-openai, aimlapi, anthropic, openrouter, openai-compatible] | LLM service
provider. When set to `openai`, the Plugin will proxy the request to
`https://api.openai.com/chat/completions`. When set to `deepseek`, the Plugin
will proxy the request to `https://api.deepseek.com/chat/completions`. When set
to `aimlapi`, the Plugin uses the OpenAI-compatible driver and proxies the
request to `https://api.aimlapi.com/v1/ [...]
| auth | object | True | |
| Authentication configurations. |
| auth.header | object | False | |
| Authentication headers. At least one of `header` or `query`
must be configured. |
| auth.query | object | False | |
| Authentication query parameters. At least one of `header` or
`query` must be configured. |
diff --git a/docs/en/latest/plugins/ai-request-rewrite.md
b/docs/en/latest/plugins/ai-request-rewrite.md
index 7b9a3212d..7b503c6be 100644
--- a/docs/en/latest/plugins/ai-request-rewrite.md
+++ b/docs/en/latest/plugins/ai-request-rewrite.md
@@ -36,7 +36,7 @@ The `ai-request-rewrite` plugin intercepts client requests
before they are forwa
| **Field** | **Required** | **Type** | **Description**
|
| ------------------------- | ------------ | -------- |
------------------------------------------------------------------------------------
|
| prompt | Yes | String | The prompt send to LLM
service. |
-| provider | Yes | String | Name of the LLM
service. Available options: openai, deekseek, azure-openai, aimlapi, openrouter
and openai-compatible. When `aimlapi` is selected, the plugin uses the
OpenAI-compatible driver with a default endpoint of
`https://api.aimlapi.com/v1/chat/completions`. |
+| provider | Yes | String | Name of the LLM
service. Available options: openai, deekseek, azure-openai, aimlapi, anthropic,
openrouter, and openai-compatible. When `aimlapi` is selected, the plugin uses
the OpenAI-compatible driver with a default endpoint of
`https://api.aimlapi.com/v1/chat/completions`. |
| auth | Yes | Object | Authentication
configuration |
| auth.header | No | Object | Authentication
headers. Key must match pattern `^[a-zA-Z0-9._-]+$`. |
| auth.query | No | Object | Authentication query
parameters. Key must match pattern `^[a-zA-Z0-9._-]+$`. |
diff --git a/docs/zh/latest/plugins/ai-proxy-multi.md
b/docs/zh/latest/plugins/ai-proxy-multi.md
index f65c90b15..e17b43096 100644
--- a/docs/zh/latest/plugins/ai-proxy-multi.md
+++ b/docs/zh/latest/plugins/ai-proxy-multi.md
@@ -7,7 +7,7 @@ keywords:
- ai-proxy-multi
- AI
- LLM
-description: ai-proxy-multi 插件通过负载均衡、重试、故障转移和健康检查扩展了 ai-proxy 的功能,简化了与
OpenAI、DeepSeek、Azure、AIMLAPI、OpenRouter 和其他 OpenAI 兼容 API 的集成。
+description: ai-proxy-multi 插件通过负载均衡、重试、故障转移和健康检查扩展了 ai-proxy 的功能,简化了与
OpenAI、DeepSeek、Azure、AIMLAPI、Anthropic、OpenRouter 和其他 OpenAI 兼容 API 的集成。
---
<!--
@@ -35,7 +35,7 @@ description: ai-proxy-multi 插件通过负载均衡、重试、故障转移和
## 描述
-`ai-proxy-multi` 插件通过将插件配置转换为 OpenAI、DeepSeek、Azure、AIMLAPI、OpenRouter 和其他
OpenAI 兼容 API 的指定请求格式,简化了对 LLM 和嵌入模型的访问。它通过负载均衡、重试、故障转移和健康检查扩展了
[`ai-proxy`](./ai-proxy.md) 的功能。
+`ai-proxy-multi` 插件通过将插件配置转换为
OpenAI、DeepSeek、Azure、AIMLAPI、Anthropic、OpenRouter 和其他 OpenAI 兼容 API
的指定请求格式,简化了对 LLM 和嵌入模型的访问。它通过负载均衡、重试、故障转移和健康检查扩展了 [`ai-proxy`](./ai-proxy.md)
的功能。
此外,该插件还支持在访问日志中记录 LLM 请求信息,如令牌使用量、模型、首次响应时间等。
@@ -58,7 +58,7 @@ description: ai-proxy-multi 插件通过负载均衡、重试、故障转移和
| balancer.key | string | 否 |
| | 当 `type` 为 `chash` 时使用。当 `hash_on` 设置为
`header` 或 `cookie` 时,需要 `key`。当 `hash_on` 设置为 `consumer` 时,不需要
`key`,因为消费者名称将自动用作键。 |
| instances | array[object] | 是 |
| | LLM 实例配置。 |
| instances.name | string | 是 |
| | LLM 服务实例的名称。 |
-| instances.provider | string | 是 |
| [openai, deepseek, azure-openai, aimlapi, openrouter,
openai-compatible] | LLM 服务提供商。设置为 `openai` 时,插件将代理请求到 `api.openai.com`。设置为
`deepseek` 时,插件将代理请求到 `api.deepseek.com`。设置为 `aimlapi` 时,插件使用 OpenAI
兼容驱动程序,默认将请求代理到 `api.aimlapi.com`。设置为 `openrouter` 时,插件使用 OpenAI
兼容驱动程序,默认将请求代理到 `openrouter.ai`。设置为 `openai-compatible` 时,插件将代理请求到在 `override`
中配置的自定义端点。 |
+| instances.provider | string | 是 |
| [openai, deepseek, azure-openai, aimlapi, anthropic,
openrouter, openai-compatible] | LLM 服务提供商。设置为 `openai` 时,插件将代理请求到
`api.openai.com`。设置为 `deepseek` 时,插件将代理请求到 `api.deepseek.com`。设置为 `aimlapi`
时,插件使用 OpenAI 兼容驱动程序,默认将请求代理到 `api.aimlapi.com`。设置为 `anthropic` 时,插件使用 OpenAI
兼容驱动程序,默认将请求代理到 `api.anthropic.com`。设置为 `openrouter` 时,插件使用 OpenAI
兼容驱动程序,默认将请求代理到 `openrouter.ai`。设置为 `openai-compati [...]
| instances.priority | integer | 否 | 0
| | LLM 实例在负载均衡中的优先级。`priority` 优先于 `weight`。 |
| instances.weight | string | 是 | 0
| 大于或等于 0 | LLM 实例在负载均衡中的权重。 |
| instances.auth | object | 是 |
| | 身份验证配置。 |
diff --git a/docs/zh/latest/plugins/ai-proxy.md
b/docs/zh/latest/plugins/ai-proxy.md
index 823a8556a..aef4b1c8c 100644
--- a/docs/zh/latest/plugins/ai-proxy.md
+++ b/docs/zh/latest/plugins/ai-proxy.md
@@ -7,7 +7,7 @@ keywords:
- ai-proxy
- AI
- LLM
-description: ai-proxy 插件通过将插件配置转换为所需的请求格式,简化了对 LLM 和嵌入模型提供商的访问,支持
OpenAI、DeepSeek、Azure、AIMLAPI、OpenRouter 和其他 OpenAI 兼容的 API。
+description: ai-proxy 插件通过将插件配置转换为所需的请求格式,简化了对 LLM 和嵌入模型提供商的访问,支持
OpenAI、DeepSeek、Azure、AIMLAPI、Anthropic、OpenRouter 和其他 OpenAI 兼容的 API。
---
<!--
@@ -35,7 +35,7 @@ description: ai-proxy 插件通过将插件配置转换为所需的请求格式
## 描述
-`ai-proxy` 插件通过将插件配置转换为指定的请求格式,简化了对 LLM 和嵌入模型的访问。它支持与
OpenAI、DeepSeek、Azure、AIMLAPI、OpenRouter 和其他 OpenAI 兼容的 API 集成。
+`ai-proxy` 插件通过将插件配置转换为指定的请求格式,简化了对 LLM 和嵌入模型的访问。它支持与
OpenAI、DeepSeek、Azure、AIMLAPI、Anthropic、OpenRouter 和其他 OpenAI 兼容的 API 集成。
此外,该插件还支持在访问日志中记录 LLM 请求信息,如令牌使用量、模型、首次响应时间等。
@@ -51,7 +51,7 @@ description: ai-proxy 插件通过将插件配置转换为所需的请求格式
| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述
|
|--------------------|--------|----------|---------|------------------------------------------|-------------|
-| provider | string | 是 | | [openai, deepseek,
azure-openai, aimlapi, openrouter, openai-compatible] | LLM 服务提供商。当设置为 `openai`
时,插件将代理请求到 `https://api.openai.com/chat/completions`。当设置为 `deepseek` 时,插件将代理请求到
`https://api.deepseek.com/chat/completions`。当设置为 `aimlapi` 时,插件使用 OpenAI
兼容驱动程序,默认将请求代理到 `https://api.aimlapi.com/v1/chat/completions`。当设置为 `openrouter`
时,插件使用 OpenAI 兼容驱动程序,默认将请求代理到
`https://openrouter.ai/api/v1/chat/completions`。当设置为 `openai-compatible`
时,插件将代理 [...]
+| provider | string | 是 | | [openai, deepseek,
azure-openai, aimlapi, anthropic, openrouter, openai-compatible] | LLM
服务提供商。当设置为 `openai` 时,插件将代理请求到 `https://api.openai.com/chat/completions`。当设置为
`deepseek` 时,插件将代理请求到 `https://api.deepseek.com/chat/completions`。当设置为
`aimlapi` 时,插件使用 OpenAI 兼容驱动程序,默认将请求代理到
`https://api.aimlapi.com/v1/chat/completions`。当设置为 `anthropic` 时,插件将代理请求到
`https://api.anthropic.com/v1/chat/completions`。当设置为 `openrouter` 时,插件使用 OpenAI
兼容驱动程序,默认 [...]
| auth | object | 是 | |
| 身份验证配置。 |
| auth.header | object | 否 | |
| 身份验证标头。必须配置 `header` 或 `query` 中的至少一个。 |
| auth.query | object | 否 | |
| 身份验证查询参数。必须配置 `header` 或 `query` 中的至少一个。 |
diff --git a/docs/zh/latest/plugins/ai-request-rewrite.md
b/docs/zh/latest/plugins/ai-request-rewrite.md
index 83a63b4c7..8b1915af8 100644
--- a/docs/zh/latest/plugins/ai-request-rewrite.md
+++ b/docs/zh/latest/plugins/ai-request-rewrite.md
@@ -36,7 +36,7 @@ description: ai-request-rewrite 插件在客户端请求转发到上游服务之
| **字段** | **必选项** | **类型** | **描述**
|
| ------------------------- | ------------ | -------- |
------------------------------------------------------------------------------------
|
| prompt | 是 | String | 发送到 LLM 服务的提示。
|
-| provider | 是 | String | LLM
服务的名称。可用选项:openai、deekseek、azure-openai、aimlapi、openrouter 和
openai-compatible。当选择 `aimlapi` 时,插件使用 OpenAI 兼容驱动程序,默认端点为
`https://api.aimlapi.com/v1/chat/completions`。 |
+| provider | 是 | String | LLM
服务的名称。可用选项:openai、deekseek、azure-openai、aimlapi、anthropic、openrouter 和
openai-compatible。当选择 `aimlapi` 时,插件使用 OpenAI 兼容驱动程序,默认端点为
`https://api.aimlapi.com/v1/chat/completions`。 |
| auth | 是 | Object | 身份验证配置
|
| auth.header | 否 | Object | 身份验证头部。键必须匹配模式
`^[a-zA-Z0-9._-]+$`。 |
| auth.query | 否 | Object | 身份验证查询参数。键必须匹配模式
`^[a-zA-Z0-9._-]+$`。 |
diff --git a/t/plugin/ai-proxy-anthropic.t b/t/plugin/ai-proxy-anthropic.t
new file mode 100644
index 000000000..c7e7c2a35
--- /dev/null
+++ b/t/plugin/ai-proxy-anthropic.t
@@ -0,0 +1,298 @@
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level("info");
+repeat_each(1);
+no_long_string();
+no_root_location();
+
+
+my $resp_file = 't/assets/openai-compatible-api-response.json';
+open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!";
+my $resp = do { local $/; <$fh> };
+close($fh);
+
+print "Hello, World!\n";
+print $resp;
+
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!defined $block->request) {
+ $block->set_value("request", "GET /t");
+ }
+
+ my $user_yaml_config = <<_EOC_;
+plugins:
+ - ai-proxy-multi
+ - prometheus
+_EOC_
+ $block->set_value("extra_yaml_config", $user_yaml_config);
+
+ my $http_config = $block->http_config // <<_EOC_;
+ server {
+ server_name anthropic;
+ listen 6725;
+
+ default_type 'application/json';
+
+ location /v1/chat/completions {
+ content_by_lua_block {
+ local json = require("cjson.safe")
+
+ if ngx.req.get_method() ~= "POST" then
+ ngx.status = 400
+ ngx.say("Unsupported request method: ",
ngx.req.get_method())
+ end
+ ngx.req.read_body()
+ local body, err = ngx.req.get_body_data()
+ body, err = json.decode(body)
+
+ local test_type = ngx.req.get_headers()["test-type"]
+ if test_type == "options" then
+ if body.foo == "bar" then
+ ngx.status = 200
+ ngx.say("options works")
+ else
+ ngx.status = 500
+ ngx.say("model options feature doesn't work")
+ end
+ return
+ end
+
+ local header_auth = ngx.req.get_headers()["authorization"]
+ local query_auth = ngx.req.get_uri_args()["apikey"]
+
+ if header_auth ~= "Bearer token" and query_auth ~=
"apikey" then
+ ngx.status = 401
+ ngx.say("Unauthorized")
+ return
+ end
+
+ if header_auth == "Bearer token" or query_auth == "apikey"
then
+ ngx.req.read_body()
+ local body, err = ngx.req.get_body_data()
+ body, err = json.decode(body)
+
+ if not body.messages or #body.messages < 1 then
+ ngx.status = 400
+ ngx.say([[{ "error": "bad request"}]])
+ return
+ end
+ if body.messages[1].content == "write an SQL query to
get all rows from student table" then
+ ngx.print("SELECT * FROM STUDENTS")
+ return
+ end
+
+ ngx.status = 200
+ ngx.say([[$resp]])
+ return
+ end
+
+
+ ngx.status = 503
+ ngx.say("reached the end of the test suite")
+ }
+ }
+
+ location /random {
+ content_by_lua_block {
+ ngx.say("path override works")
+ }
+ }
+ }
+_EOC_
+
+ $block->set_value("http_config", $http_config);
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: set route with right auth header
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "uri": "/anything",
+ "plugins": {
+ "ai-proxy-multi": {
+ "instances": [
+ {
+ "name": "anthropic",
+ "provider": "anthropic",
+ "weight": 1,
+ "auth": {
+ "header": {
+ "Authorization": "Bearer token"
+ }
+ },
+ "options": {
+ "model": "claude-sonnet-4-5",
+ "max_tokens": 512,
+ "temperature": 1.0
+ },
+ "override": {
+ "endpoint":
"http://localhost:6725/v1/chat/completions"
+ }
+ }
+ ],
+ "ssl_verify": false
+ }
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 2: send request
+--- request
+POST /anything
+{ "messages": [ { "role": "system", "content": "You are a mathematician" }, {
"role": "user", "content": "What is 1+1?"} ] }
+--- more_headers
+Authorization: Bearer token
+--- error_code: 200
+--- response_body eval
+qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/
+
+
+
+=== TEST 3: set route with stream = true (SSE)
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "uri": "/anything",
+ "plugins": {
+ "ai-proxy-multi": {
+ "instances": [
+ {
+ "name": "anthropic",
+ "provider": "anthropic",
+ "weight": 1,
+ "auth": {
+ "header": {
+ "Authorization": "Bearer token"
+ }
+ },
+ "options": {
+ "model": "claude-sonnet-4-5",
+ "max_tokens": 512,
+ "temperature": 1.0,
+ "stream": true
+ },
+ "override": {
+ "endpoint":
"http://localhost:7737/v1/chat/completions"
+ }
+ }
+ ],
+ "ssl_verify": false
+ }
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 4: test is SSE works as expected
+--- config
+ location /t {
+ content_by_lua_block {
+ local http = require("resty.http")
+ local httpc = http.new()
+ local core = require("apisix.core")
+
+ local ok, err = httpc:connect({
+ scheme = "http",
+ host = "localhost",
+ port = ngx.var.server_port,
+ })
+
+ if not ok then
+ ngx.status = 500
+ ngx.say(err)
+ return
+ end
+
+ local params = {
+ method = "POST",
+ headers = {
+ ["Content-Type"] = "application/json",
+ },
+ path = "/anything",
+ body = [[{
+ "messages": [
+ { "role": "system", "content": "some content" }
+ ],
+ "stream": true
+ }]],
+ }
+
+ local res, err = httpc:request(params)
+ if not res then
+ ngx.status = 500
+ ngx.say(err)
+ return
+ end
+
+ local final_res = {}
+ while true do
+ local chunk, err = res.body_reader() -- will read chunk by
chunk
+ if err then
+ core.log.error("failed to read response chunk: ", err)
+ break
+ end
+ if not chunk then
+ break
+ end
+ core.table.insert_tail(final_res, chunk)
+ end
+
+ ngx.print(#final_res .. final_res[6])
+ }
+ }
+--- response_body_like eval
+qr/6data: \[DONE\]\n\n/