This is an automated email from the ASF dual-hosted git repository.

baoyuan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git


The following commit(s) were added to refs/heads/master by this push:
     new ff8c350a3 feat: Add AIMLAPI provider support to AI plugins (#12379)
ff8c350a3 is described below

commit ff8c350a37ef7f1bdfbb330de3fc2fe771d1eb5a
Author: Dmitry <tumas...@yandex.ru>
AuthorDate: Wed Jul 2 09:16:08 2025 +0200

    feat: Add AIMLAPI provider support to AI plugins (#12379)
---
 apisix/plugins/ai-drivers/aimlapi.lua        | 24 +++++++++++++++++++++++
 apisix/plugins/ai-proxy/schema.lua           | 15 +++++++++++---
 apisix/plugins/ai-request-rewrite.lua        |  7 ++++++-
 docs/en/latest/plugins/ai-proxy-multi.md     | 10 +++++-----
 docs/en/latest/plugins/ai-proxy.md           |  6 +++---
 docs/en/latest/plugins/ai-request-rewrite.md |  4 ++--
 t/plugin/ai-request-rewrite.t                | 29 +++++++++++++++++++++++++++-
 7 files changed, 80 insertions(+), 15 deletions(-)

diff --git a/apisix/plugins/ai-drivers/aimlapi.lua 
b/apisix/plugins/ai-drivers/aimlapi.lua
new file mode 100644
index 000000000..dad101492
--- /dev/null
+++ b/apisix/plugins/ai-drivers/aimlapi.lua
@@ -0,0 +1,24 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+return require("apisix.plugins.ai-drivers.openai-base").new(
+    {
+        host = "api.aimlapi.com",
+        path = "/chat/completions",
+        port = 443
+    }
+)
diff --git a/apisix/plugins/ai-proxy/schema.lua 
b/apisix/plugins/ai-proxy/schema.lua
index d0e8f23cb..0a3c0280d 100644
--- a/apisix/plugins/ai-proxy/schema.lua
+++ b/apisix/plugins/ai-proxy/schema.lua
@@ -61,8 +61,12 @@ local ai_instance_schema = {
             provider = {
                 type = "string",
                 description = "Type of the AI service instance.",
-                enum = { "openai", "deepseek", "openai-compatible" }, -- add 
more providers later
-
+                enum = {
+                    "openai",
+                    "deepseek",
+                    "aimlapi",
+                    "openai-compatible",
+                }, -- add more providers later
             },
             priority = {
                 type = "integer",
@@ -96,7 +100,12 @@ _M.ai_proxy_schema = {
         provider = {
             type = "string",
             description = "Type of the AI service instance.",
-            enum = { "openai", "deepseek", "openai-compatible" }, -- add more 
providers later
+            enum = {
+                "openai",
+                "deepseek",
+                "aimlapi",
+                "openai-compatible",
+            }, -- add more providers later
 
         },
         auth = auth_schema,
diff --git a/apisix/plugins/ai-request-rewrite.lua 
b/apisix/plugins/ai-request-rewrite.lua
index 4027b82e0..1b850eb7f 100644
--- a/apisix/plugins/ai-request-rewrite.lua
+++ b/apisix/plugins/ai-request-rewrite.lua
@@ -65,7 +65,12 @@ local schema = {
         provider = {
             type = "string",
             description = "Name of the AI service provider.",
-            enum = {"openai", "openai-compatible", "deepseek"} -- add more 
providers later
+            enum = {
+                "openai",
+                "openai-compatible",
+                "deepseek",
+                "aimlapi"
+            } -- add more providers later
         },
         auth = auth_schema,
         options = model_options_schema,
diff --git a/docs/en/latest/plugins/ai-proxy-multi.md 
b/docs/en/latest/plugins/ai-proxy-multi.md
index 6418599d9..368228c70 100644
--- a/docs/en/latest/plugins/ai-proxy-multi.md
+++ b/docs/en/latest/plugins/ai-proxy-multi.md
@@ -7,7 +7,7 @@ keywords:
   - ai-proxy-multi
   - AI
   - LLM
-description: The ai-proxy-multi Plugin extends the capabilities of ai-proxy 
with load balancing, retries, fallbacks, and health chekcs, simplying the 
integration with OpenAI, DeepSeek, and other OpenAI-compatible APIs.
+description: The ai-proxy-multi Plugin extends the capabilities of ai-proxy 
with load balancing, retries, fallbacks, and health chekcs, simplifying the 
integration with OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs.
 ---
 
 <!--
@@ -35,7 +35,7 @@ description: The ai-proxy-multi Plugin extends the 
capabilities of ai-proxy with
 
 ## Description
 
-The `ai-proxy-multi` Plugin simplifies access to LLM and embedding models by 
transforming Plugin configurations into the designated request format for 
OpenAI, DeepSeek, and other OpenAI-compatible APIs. It extends the capabilities 
of [`ai-proxy-multi`](./ai-proxy.md) with load balancing, retries, fallbacks, 
and health checks.
+The `ai-proxy-multi` Plugin simplifies access to LLM and embedding models by 
transforming Plugin configurations into the designated request format for 
OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs. It extends the 
capabilities of [`ai-proxy-multi`](./ai-proxy.md) with load balancing, retries, 
fallbacks, and health checks.
 
 In addition, the Plugin also supports logging LLM request information in the 
access log, such as token usage, model, time to the first response, and more.
 
@@ -58,20 +58,20 @@ In addition, the Plugin also supports logging LLM request 
information in the acc
 | balancer.key                       | string         | False    |             
                      |              | Used when `type` is `chash`. When 
`hash_on` is set to `header` or `cookie`, `key` is required. When `hash_on` is 
set to `consumer`, `key` is not required as the consumer name will be used as 
the key automatically. |
 | instances                          | array[object]  | True     |             
                      |              | LLM instance configurations. |
 | instances.name                     | string         | True     |             
                      |              | Name of the LLM service instance. |
-| instances.provider                 | string         | True     |             
                      | [openai, deepseek, openai-compatible] | LLM service 
provider. When set to `openai`, the Plugin will proxy the request to 
`api.openai.com`. When set to `deepseek`, the Plugin will proxy the request to 
`api.deepseek.com`. When set to `openai-compatible`, the Plugin will proxy the 
request to the custom endpoint configured in `override`. |
+| instances.provider                 | string         | True     |             
                      | [openai, deepseek, aimlapi, openai-compatible] | LLM 
service provider. When set to `openai`, the Plugin will proxy the request to 
`api.openai.com`. When set to `deepseek`, the Plugin will proxy the request to 
`api.deepseek.com`. When set to `aimlapi`, the Plugin uses the 
OpenAI-compatible driver and proxies the request to `api.aimlapi.com` by 
default. When set to `openai-compatible`, th [...]
 | instances.priority                  | integer        | False    | 0          
                     |              | Priority of the LLM instance in load 
balancing. `priority` takes precedence over `weight`. |
 | instances.weight                    | string         | True     | 0          
                     | greater or equal to 0 | Weight of the LLM instance in 
load balancing. |
 | instances.auth                      | object         | True     |            
                       |              | Authentication configurations. |
 | instances.auth.header               | object         | False    |            
                       |              | Authentication headers. At least one of 
the `header` and `query` should be configured. |
 | instances.auth.query                | object         | False    |            
                       |              | Authentication query parameters. At 
least one of the `header` and `query` should be configured. |
-| instances.options                   | object         | False    |            
                       |              | Model configurations. In addition to 
`model`, you can configure additional parameters and they will be forwarded to 
the upstream LLM service in the request body. For instance, if you are working 
with OpenAI or DeepSeek, you can configure additional parameters such as 
`max_tokens`, `temperature`, `top_p`, and `stream`. See your LLM provider's API 
documentation for more av [...]
+| instances.options                   | object         | False    |            
                       |              | Model configurations. In addition to 
`model`, you can configure additional parameters and they will be forwarded to 
the upstream LLM service in the request body. For instance, if you are working 
with OpenAI, DeepSeek, or AIMLAPI, you can configure additional parameters such 
as `max_tokens`, `temperature`, `top_p`, and `stream`. See your LLM provider's 
API documentation f [...]
 | instances.options.model             | string         | False    |            
                       |              | Name of the LLM model, such as `gpt-4` 
or `gpt-3.5`. See your LLM provider's API documentation for more available 
models. |
 | logging                             | object         | False    |            
                       |              | Logging configurations. |
 | logging.summaries                   | boolean        | False    | false      
                     |              | If true, log request LLM model, duration, 
request, and response tokens. |
 | logging.payloads                    | boolean        | False    | false      
                     |              | If true, log request and response 
payload. |
 | logging.override                    | object         | False    |            
                       |              | Override setting. |
 | logging.override.endpoint           | string         | False    |            
                       |              | LLM provider endpoint to replace the 
default endpoint with. If not configured, the Plugin uses the default OpenAI 
endpoint `https://api.openai.com/v1/chat/completions`. |
-| checks                              | object         | False    |            
                       |              | Health check configurations. Note that 
at the moment, OpenAI and DeepSeek do not provide an official health check 
endpoint. Other LLM services that you can configure under `openai-compatible` 
provider may have available health check endpoints. |
+| checks                              | object         | False    |            
                       |              | Health check configurations. Note that 
at the moment, OpenAI, DeepSeek, and AIMLAPI do not provide an official health 
check endpoint. Other LLM services that you can configure under 
`openai-compatible` provider may have available health check endpoints. |
 | checks.active                       | object         | True     |            
                       |              | Active health check configurations. |
 | checks.active.type                  | string         | False    | http       
                     | [http, https, tcp] | Type of health check connection. |
 | checks.active.timeout               | number         | False    | 1          
                     |              | Health check timeout in seconds. |
diff --git a/docs/en/latest/plugins/ai-proxy.md 
b/docs/en/latest/plugins/ai-proxy.md
index 239c6df5d..762026642 100644
--- a/docs/en/latest/plugins/ai-proxy.md
+++ b/docs/en/latest/plugins/ai-proxy.md
@@ -7,7 +7,7 @@ keywords:
   - ai-proxy
   - AI
   - LLM
-description: The ai-proxy Plugin simplifies access to LLM and embedding models 
providers by converting Plugin configurations into the required request format 
for OpenAI, DeepSeek, and other OpenAI-compatible APIs.
+description: The ai-proxy Plugin simplifies access to LLM and embedding models 
providers by converting Plugin configurations into the required request format 
for OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs.
 ---
 
 <!--
@@ -35,7 +35,7 @@ description: The ai-proxy Plugin simplifies access to LLM and 
embedding models p
 
 ## Description
 
-The `ai-proxy` Plugin simplifies access to LLM and embedding models by 
transforming Plugin configurations into the designated request format. It 
supports the integration with OpenAI, DeepSeek, and other OpenAI-compatible 
APIs.
+The `ai-proxy` Plugin simplifies access to LLM and embedding models by 
transforming Plugin configurations into the designated request format. It 
supports the integration with OpenAI, DeepSeek, AIMLAPI, and other 
OpenAI-compatible APIs.
 
 In addition, the Plugin also supports logging LLM request information in the 
access log, such as token usage, model, time to the first response, and more.
 
@@ -51,7 +51,7 @@ In addition, the Plugin also supports logging LLM request 
information in the acc
 
 | Name               | Type    | Required | Default | Valid values             
                 | Description |
 
|--------------------|--------|----------|---------|------------------------------------------|-------------|
-| provider          | string  | True     |         | [openai, deepseek, 
openai-compatible] | LLM service provider. When set to `openai`, the Plugin 
will proxy the request to `https://api.openai.com/chat/completions`. When set 
to `deepseek`, the Plugin will proxy the request to 
`https://api.deepseek.com/chat/completions`. When set to `openai-compatible`, 
the Plugin will proxy the request to the custom endpoint configured in 
`override`. |
+| provider          | string  | True     |         | [openai, deepseek, 
aimlapi, openai-compatible] | LLM service provider. When set to `openai`, the 
Plugin will proxy the request to `https://api.openai.com/chat/completions`. 
When set to `deepseek`, the Plugin will proxy the request to 
`https://api.deepseek.com/chat/completions`. When set to `aimlapi`, the Plugin 
uses the OpenAI-compatible driver and proxies the request to 
`https://api.aimlapi.com/v1/chat/completions` by default. When se [...]
 | auth             | object  | True     |         |                            
              | Authentication configurations. |
 | auth.header      | object  | False    |         |                            
              | Authentication headers. At least one of `header` or `query` 
must be configured. |
 | auth.query       | object  | False    |         |                            
              | Authentication query parameters. At least one of `header` or 
`query` must be configured. |
diff --git a/docs/en/latest/plugins/ai-request-rewrite.md 
b/docs/en/latest/plugins/ai-request-rewrite.md
index ad1c0735c..584391d2a 100644
--- a/docs/en/latest/plugins/ai-request-rewrite.md
+++ b/docs/en/latest/plugins/ai-request-rewrite.md
@@ -36,12 +36,12 @@ The `ai-request-rewrite` plugin intercepts client requests 
before they are forwa
 | **Field**                 | **Required** | **Type** | **Description**        
                                                              |
 | ------------------------- | ------------ | -------- | 
------------------------------------------------------------------------------------
 |
 | prompt                    | Yes          | String   | The prompt send to LLM 
service.                                                      |
-| provider                  | Yes          | String   | Name of the LLM 
service. Available options: openai, deekseek and openai-compatible   |
+| provider                  | Yes          | String   | Name of the LLM 
service. Available options: openai, deekseek, aimlapi and openai-compatible. 
When `aimlapi` is selected, the plugin uses the OpenAI-compatible driver with a 
default endpoint of `https://api.aimlapi.com/v1/chat/completions`.   |
 | auth                      | Yes          | Object   | Authentication 
configuration                                                         |
 | auth.header               | No           | Object   | Authentication 
headers. Key must match pattern `^[a-zA-Z0-9._-]+$`.                  |
 | auth.query                | No           | Object   | Authentication query 
parameters. Key must match pattern `^[a-zA-Z0-9._-]+$`.         |
 | options                   | No           | Object   | Key/value settings for 
the model                                                     |
-| options.model             | No           | String   | Model to execute. 
Examples: "gpt-3.5-turbo" for openai, "deepseek-chat" for deekseek, or 
"qwen-turbo" for openai-compatible services |
+| options.model             | No           | String   | Model to execute. 
Examples: "gpt-3.5-turbo" for openai, "deepseek-chat" for deekseek, or 
"qwen-turbo" for openai-compatible or aimlapi services |
 | override.endpoint         | No           | String   | Override the default 
endpoint when using OpenAI-compatible services (e.g., self-hosted models or 
third-party LLM services). When the provider is 'openai-compatible', the 
endpoint field is required. |
 | timeout                   | No           | Integer  | Total timeout in 
milliseconds for requests to LLM service, including connect, send, and read 
timeouts. Range: 1 - 60000. Default: 30000|
 | keepalive                 | No           | Boolean  | Enable keepalive for 
requests to LLM service. Default: true                                  |
diff --git a/t/plugin/ai-request-rewrite.t b/t/plugin/ai-request-rewrite.t
index 2cbd59f7b..fc25ac428 100644
--- a/t/plugin/ai-request-rewrite.t
+++ b/t/plugin/ai-request-rewrite.t
@@ -211,7 +211,7 @@ property "provider" is required
 
 
 
-=== TEST 5: provider must be one of: deepseek, openai, openai-compatible
+=== TEST 5: provider must be one of: deepseek, openai, aimlapi, 
openai-compatible
 --- config
     location /t {
         content_by_lua_block {
@@ -710,3 +710,30 @@ passed
 LLM service returned error status: 500
 --- response_body
 passed
+
+
+
+=== TEST 15: provider aimlapi
+--- config
+    location /t {
+        content_by_lua_block {
+            local plugin = require("apisix.plugins.ai-request-rewrite")
+            local ok, err = plugin.check_schema({
+                prompt = "some prompt",
+                provider = "aimlapi",
+                auth = {
+                    header = {
+                        Authorization =  "Bearer token"
+                    }
+                }
+            })
+
+            if not ok then
+                ngx.say(err)
+            else
+                ngx.say("passed")
+            end
+        }
+    }
+--- response_body
+passed

Reply via email to