bzp2010 commented on code in PR #11499:
URL: https://github.com/apache/apisix/pull/11499#discussion_r1726433467


##########
apisix/plugins/ai-proxy/drivers/openai.lua:
##########
@@ -0,0 +1,67 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local core = require("apisix.core")
+local test_scheme = os.getenv("AI_PROXY_TEST_SCHEME")
+local ngx = ngx
+local pairs = pairs
+
+-- globals
+local DEFAULT_HOST = "api.openai.com"
+local DEFAULT_PORT = 443
+
+local path_mapper = {
+    ["llm/completions"] = "/v1/completions",
+    ["llm/chat"] = "/v1/chat/completions",
+}
+
+
+function _M.configure_request(conf, request_table, ctx)
+    local ip, err = 
core.resolver.parse_domain(conf.model.options.upstream_host or DEFAULT_HOST)
+    if not ip then
+        core.log.error("failed to resolve ai_proxy upstream host: ", err)
+        return core.response.exit(500)
+    end
+    ctx.custom_upstream_ip = ip
+    ctx.custom_upstream_port = conf.model.options.upstream_port or DEFAULT_PORT
+
+    local ups_path = (conf.model.options and conf.model.options.upstream_path)
+                        or path_mapper[conf.route_type]
+    ngx.var.upstream_uri = ups_path
+    ngx.var.upstream_scheme = test_scheme or "https"
+    ngx.req.set_method(ngx.HTTP_POST)
+    ngx.var.upstream_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_port = conf.model.options.port or DEFAULT_PORT

Review Comment:
   Should we use the same `upstream.set` API as `traffic-split` to implement 
this?



##########
apisix/plugins/ai-proxy/schema.lua:
##########
@@ -0,0 +1,167 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local auth_schema = {
+    type = "object",
+    properties = {
+        source = {
+            type = "string",
+            enum = {"header", "param"}
+        },
+        name = {
+            type = "string",
+            description = "Name of the param/header carrying Authorization or 
API key.",
+            minLength = 1,
+        },
+        value = {
+            type = "string",
+            description = "Full auth-header/param value.",
+            minLength = 1,
+             -- TODO encrypted = true,
+        },
+    },
+    required = { "source", "name", "value" },
+    additionalProperties = false,
+}
+
+local model_options_schema = {
+    description = "Key/value settings for the model",
+    type = "object",
+    properties = {
+        max_tokens = {
+            type = "integer",
+            description = "Defines the max_tokens, if using chat or completion 
models.",
+            default = 256
+
+        },
+        input_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in your prompt.",
+            minimum = 0
+
+        },
+        output_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in the output of the 
AI.",
+            minimum = 0
+
+        },
+        temperature = {
+            type = "number",
+            description = "Defines the matching temperature, if using chat or 
completion models.",
+            minimum = 0.0,
+            maximum = 5.0,
+
+        },
+        top_p = {
+            type = "number",
+            description = "Defines the top-p probability mass, if supported.",
+            minimum = 0,
+            maximum = 1,
+
+        },
+        upstream_host = {
+            type = "string",
+            description = "To be specified to override the host of the AI 
provider",
+        },
+        upstream_port = {
+            type = "integer",
+            description = "To be specified to override the AI provider port",
+
+        },
+        upstream_path = {
+            type = "string",
+            description = "To be specified to override the URL to the AI 
provider endpoints",
+        },
+        response_streaming = {
+            description = "Stream response by SSE",
+            type = "boolean",
+            default = false,
+        }
+    }
+}
+
+local model_schema = {
+    type = "object",
+    properties = {
+        provider = {
+            type = "string",
+            description = "AI provider request format - kapisix translates "
+                .. "requests to and from the specified backend compatible 
formats.",
+            oneOf = { "openai" }, -- add more providers later
+
+        },
+        name = {
+            type = "string",
+            description = "Model name to execute.",
+        },
+        options = model_options_schema,
+    },
+    required = {"provider", "name"}
+}
+
+_M.plugin_schema = {
+    type = "object",
+    properties = {
+        route_type = {
+            type = "string",
+            enum = { "llm/chat", "llm/completions", "passthrough" }

Review Comment:
   I don't see the need to support the old and inflexible `llm/completions` API.



##########
apisix/plugins/ai-proxy/drivers/openai.lua:
##########
@@ -0,0 +1,67 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local core = require("apisix.core")
+local test_scheme = os.getenv("AI_PROXY_TEST_SCHEME")
+local ngx = ngx
+local pairs = pairs
+
+-- globals
+local DEFAULT_HOST = "api.openai.com"
+local DEFAULT_PORT = 443
+
+local path_mapper = {
+    ["llm/completions"] = "/v1/completions",
+    ["llm/chat"] = "/v1/chat/completions",
+}
+
+
+function _M.configure_request(conf, request_table, ctx)
+    local ip, err = 
core.resolver.parse_domain(conf.model.options.upstream_host or DEFAULT_HOST)
+    if not ip then
+        core.log.error("failed to resolve ai_proxy upstream host: ", err)
+        return core.response.exit(500)
+    end
+    ctx.custom_upstream_ip = ip
+    ctx.custom_upstream_port = conf.model.options.upstream_port or DEFAULT_PORT
+
+    local ups_path = (conf.model.options and conf.model.options.upstream_path)
+                        or path_mapper[conf.route_type]
+    ngx.var.upstream_uri = ups_path
+    ngx.var.upstream_scheme = test_scheme or "https"
+    ngx.req.set_method(ngx.HTTP_POST)
+    ngx.var.upstream_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_port = conf.model.options.port or DEFAULT_PORT
+    if conf.auth.source == "header" then

Review Comment:
   What do you mean `source`? This doesn't seem to mean that we will get a 
value from the header of the client's request, but only use the configuration, 
so I don't think the naming of the source makes sense.



##########
apisix/plugins/ai-proxy.lua:
##########
@@ -0,0 +1,107 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local schema = require("apisix.plugins.ai-proxy.schema")
+local constants = require("apisix.constants")
+local require = require
+
+local ngx_req = ngx.req
+local ngx = ngx
+
+local plugin_name = "ai-proxy"
+local _M = {
+    version = 0.5,
+    priority = 1004,
+    name = plugin_name,
+    schema = schema,
+}
+
+
+function _M.check_schema(conf)
+    local ai_driver = pcall(require, "apisix.plugins.ai-proxy.drivers." .. 
conf.model.provider)
+    if not ai_driver then
+        return false, "provider: " .. conf.model.provider .. " is not 
supported."
+    end
+    return core.schema.check(schema.plugin_schema, conf)
+end
+
+
+local CONTENT_TYPE_JSON = "application/json"
+
+
+local function get_request_table()
+    local req_body, err = core.request.get_body()
+    if not req_body then
+        return nil, "failed to get request body: " .. (err or "request body is 
empty")
+    end
+    req_body, err = req_body:gsub("\\\"", "\"") -- remove escaping in JSON

Review Comment:
   Please provide an example to explain the scenario specifically, otherwise 
why would we want to do such a hack on a string that is indeed JSON.



##########
apisix/init.lua:
##########
@@ -893,7 +906,16 @@ function _M.http_balancer_phase()
         return core.response.exit(500)
     end
 
-    load_balancer.run(api_ctx.matched_route, api_ctx, common_phase)
+    if api_ctx.custom_upstream_ip then
+        local ok, err = balancer.set_current_peer(api_ctx.custom_upstream_ip,
+                                                  api_ctx.custom_upstream_port)
+        if not ok then
+            core.log.error("failed to overwrite upstream for ai_proxy: ", err)
+            return core.response.exit(500)
+        end
+    else

Review Comment:
   Why not use the `upstream.set` API to modify the upstream endpoints instead 
of doing this? Is this mandatory?



##########
apisix/plugins/ai-proxy.lua:
##########
@@ -0,0 +1,103 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local schema = require("apisix.plugins.ai-proxy.schema")
+local constants = require("apisix.constants")
+local require = require
+
+local ngx_req = ngx.req
+local ngx = ngx
+
+local plugin_name = "ai-proxy"
+local _M = {
+    version = 0.5,
+    priority = 1004,
+    name = plugin_name,
+    schema = schema,
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema.plugin_schema, conf)
+end
+
+
+local CONTENT_TYPE_JSON = "application/json"

Review Comment:
   This may not be possible to assert simply as a static string, as the 
content-type may have the following format
   
   ```http
   Content-Type: application/json;charset=utf8
   ```



##########
apisix/plugins/ai-proxy.lua:
##########
@@ -0,0 +1,107 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local schema = require("apisix.plugins.ai-proxy.schema")
+local constants = require("apisix.constants")
+local require = require
+
+local ngx_req = ngx.req
+local ngx = ngx
+
+local plugin_name = "ai-proxy"
+local _M = {
+    version = 0.5,
+    priority = 1004,
+    name = plugin_name,
+    schema = schema,
+}
+
+
+function _M.check_schema(conf)
+    local ai_driver = pcall(require, "apisix.plugins.ai-proxy.drivers." .. 
conf.model.provider)
+    if not ai_driver then
+        return false, "provider: " .. conf.model.provider .. " is not 
supported."
+    end
+    return core.schema.check(schema.plugin_schema, conf)
+end
+
+
+local CONTENT_TYPE_JSON = "application/json"
+
+
+local function get_request_table()
+    local req_body, err = core.request.get_body()
+    if not req_body then
+        return nil, "failed to get request body: " .. (err or "request body is 
empty")
+    end
+    req_body, err = req_body:gsub("\\\"", "\"") -- remove escaping in JSON
+    if not req_body then
+        return nil, "failed to remove escaping from body: " .. req_body .. ". 
err: " .. err
+    end
+    return core.json.decode(req_body)
+end
+
+function _M.access(conf, ctx)
+    local route_type = conf.route_type
+    ctx.ai_proxy = {}
+
+    local content_type = core.request.header(ctx, "Content-Type") or 
CONTENT_TYPE_JSON
+    if content_type ~= CONTENT_TYPE_JSON then
+        return 400, "unsupported content-type: " .. content_type
+    end
+
+    local request_table, err = get_request_table()
+    if not request_table then
+        return 400, err
+    end
+
+    local req_schema = schema.chat_request_schema
+    if route_type == constants.COMPLETION then
+        req_schema = schema.chat_completion_request_schema
+    end
+    local ok, err = core.schema.check(req_schema, request_table)
+    if not ok then
+        return 400, "request format doesn't match schema: " .. err
+    end
+
+    if conf.model.options and conf.model.options.response_streaming then
+        request_table.stream = true
+        ngx.ctx.disable_proxy_buffering = true
+    end
+
+    if conf.model.name then
+        request_table.model = conf.model.name
+    end
+
+    local ai_driver = require("apisix.plugins.ai-proxy.drivers." .. 
conf.model.provider)
+    local ok, err = ai_driver.configure_request(conf, request_table, ctx)
+    if not ok then
+        core.log.error("failed to configure request for AI service: ", err)
+        return 500
+    end
+
+    if route_type ~= "passthrough" then
+        local final_body, err = core.json.encode(request_table)
+        if not final_body then
+            core.log.error("failed to encode request body to JSON: ", err)
+            return 500
+        end
+        ngx_req.set_body_data(final_body)
+    end
+end
+
+return _M

Review Comment:
   I didn't notice that we used any sub-requests, in the future, how will we 
post-process the AI API response?



##########
apisix/plugins/ai-proxy/schema.lua:
##########
@@ -0,0 +1,167 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local auth_schema = {
+    type = "object",
+    properties = {
+        source = {
+            type = "string",
+            enum = {"header", "param"}
+        },
+        name = {
+            type = "string",
+            description = "Name of the param/header carrying Authorization or 
API key.",
+            minLength = 1,
+        },
+        value = {
+            type = "string",
+            description = "Full auth-header/param value.",
+            minLength = 1,
+             -- TODO encrypted = true,
+        },
+    },
+    required = { "source", "name", "value" },
+    additionalProperties = false,
+}
+
+local model_options_schema = {

Review Comment:
   Is it compatible with other potential AI Providers?



##########
apisix/plugins/ai-proxy/schema.lua:
##########
@@ -0,0 +1,167 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local auth_schema = {
+    type = "object",
+    properties = {
+        source = {
+            type = "string",
+            enum = {"header", "param"}
+        },
+        name = {
+            type = "string",
+            description = "Name of the param/header carrying Authorization or 
API key.",
+            minLength = 1,
+        },
+        value = {
+            type = "string",
+            description = "Full auth-header/param value.",
+            minLength = 1,
+             -- TODO encrypted = true,
+        },
+    },
+    required = { "source", "name", "value" },
+    additionalProperties = false,
+}
+
+local model_options_schema = {
+    description = "Key/value settings for the model",
+    type = "object",
+    properties = {
+        max_tokens = {
+            type = "integer",
+            description = "Defines the max_tokens, if using chat or completion 
models.",
+            default = 256
+
+        },
+        input_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in your prompt.",
+            minimum = 0
+
+        },
+        output_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in the output of the 
AI.",
+            minimum = 0
+
+        },
+        temperature = {
+            type = "number",
+            description = "Defines the matching temperature, if using chat or 
completion models.",
+            minimum = 0.0,
+            maximum = 5.0,
+
+        },
+        top_p = {
+            type = "number",
+            description = "Defines the top-p probability mass, if supported.",
+            minimum = 0,
+            maximum = 1,
+
+        },
+        upstream_host = {
+            type = "string",
+            description = "To be specified to override the host of the AI 
provider",
+        },
+        upstream_port = {
+            type = "integer",
+            description = "To be specified to override the AI provider port",
+
+        },
+        upstream_path = {
+            type = "string",
+            description = "To be specified to override the URL to the AI 
provider endpoints",
+        },
+        response_streaming = {
+            description = "Stream response by SSE",
+            type = "boolean",
+            default = false,
+        }
+    }
+}
+
+local model_schema = {
+    type = "object",
+    properties = {
+        provider = {
+            type = "string",
+            description = "AI provider request format - kapisix translates "

Review Comment:
   What is the `kapisix`?



##########
apisix/plugins/ai-proxy/drivers/openai.lua:
##########
@@ -0,0 +1,67 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local core = require("apisix.core")
+local test_scheme = os.getenv("AI_PROXY_TEST_SCHEME")
+local ngx = ngx
+local pairs = pairs
+
+-- globals
+local DEFAULT_HOST = "api.openai.com"
+local DEFAULT_PORT = 443
+
+local path_mapper = {
+    ["llm/completions"] = "/v1/completions",

Review Comment:
   Why we need to support the v1 API marked as `Legacy`?



##########
apisix/plugins/ai-proxy/drivers/openai.lua:
##########
@@ -0,0 +1,67 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local core = require("apisix.core")
+local test_scheme = os.getenv("AI_PROXY_TEST_SCHEME")
+local ngx = ngx
+local pairs = pairs
+
+-- globals
+local DEFAULT_HOST = "api.openai.com"
+local DEFAULT_PORT = 443
+
+local path_mapper = {
+    ["llm/completions"] = "/v1/completions",
+    ["llm/chat"] = "/v1/chat/completions",
+}
+
+
+function _M.configure_request(conf, request_table, ctx)
+    local ip, err = 
core.resolver.parse_domain(conf.model.options.upstream_host or DEFAULT_HOST)
+    if not ip then
+        core.log.error("failed to resolve ai_proxy upstream host: ", err)
+        return core.response.exit(500)
+    end
+    ctx.custom_upstream_ip = ip
+    ctx.custom_upstream_port = conf.model.options.upstream_port or DEFAULT_PORT
+
+    local ups_path = (conf.model.options and conf.model.options.upstream_path)
+                        or path_mapper[conf.route_type]
+    ngx.var.upstream_uri = ups_path
+    ngx.var.upstream_scheme = test_scheme or "https"
+    ngx.req.set_method(ngx.HTTP_POST)
+    ngx.var.upstream_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_host = conf.model.options.upstream_host or DEFAULT_HOST
+    ctx.custom_balancer_port = conf.model.options.port or DEFAULT_PORT

Review Comment:
   This should be implemented using the `upstream.set` API, just like the 
traffic-split plugin.



##########
apisix/plugins/ai-proxy/schema.lua:
##########
@@ -0,0 +1,167 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local auth_schema = {
+    type = "object",
+    properties = {
+        source = {
+            type = "string",
+            enum = {"header", "param"}
+        },
+        name = {
+            type = "string",
+            description = "Name of the param/header carrying Authorization or 
API key.",
+            minLength = 1,
+        },
+        value = {
+            type = "string",
+            description = "Full auth-header/param value.",
+            minLength = 1,
+             -- TODO encrypted = true,
+        },
+    },
+    required = { "source", "name", "value" },
+    additionalProperties = false,
+}
+
+local model_options_schema = {
+    description = "Key/value settings for the model",
+    type = "object",
+    properties = {
+        max_tokens = {
+            type = "integer",
+            description = "Defines the max_tokens, if using chat or completion 
models.",
+            default = 256
+
+        },
+        input_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in your prompt.",
+            minimum = 0
+
+        },
+        output_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in the output of the 
AI.",
+            minimum = 0
+
+        },
+        temperature = {
+            type = "number",
+            description = "Defines the matching temperature, if using chat or 
completion models.",
+            minimum = 0.0,
+            maximum = 5.0,
+
+        },
+        top_p = {
+            type = "number",
+            description = "Defines the top-p probability mass, if supported.",
+            minimum = 0,
+            maximum = 1,
+
+        },
+        upstream_host = {
+            type = "string",
+            description = "To be specified to override the host of the AI 
provider",
+        },
+        upstream_port = {
+            type = "integer",
+            description = "To be specified to override the AI provider port",
+
+        },
+        upstream_path = {
+            type = "string",
+            description = "To be specified to override the URL to the AI 
provider endpoints",
+        },
+        response_streaming = {
+            description = "Stream response by SSE",
+            type = "boolean",
+            default = false,
+        }

Review Comment:
   We'd better move the configuration about the HTTP request itself like 
upstream or response to a higher level.



##########
apisix/plugins/ai-proxy.lua:
##########
@@ -0,0 +1,107 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local schema = require("apisix.plugins.ai-proxy.schema")
+local constants = require("apisix.constants")
+local require = require
+
+local ngx_req = ngx.req
+local ngx = ngx
+
+local plugin_name = "ai-proxy"
+local _M = {
+    version = 0.5,
+    priority = 1004,
+    name = plugin_name,
+    schema = schema,
+}
+
+
+function _M.check_schema(conf)
+    local ai_driver = pcall(require, "apisix.plugins.ai-proxy.drivers." .. 
conf.model.provider)
+    if not ai_driver then
+        return false, "provider: " .. conf.model.provider .. " is not 
supported."
+    end
+    return core.schema.check(schema.plugin_schema, conf)
+end
+
+
+local CONTENT_TYPE_JSON = "application/json"
+
+
+local function get_request_table()
+    local req_body, err = core.request.get_body()
+    if not req_body then
+        return nil, "failed to get request body: " .. (err or "request body is 
empty")
+    end
+    req_body, err = req_body:gsub("\\\"", "\"") -- remove escaping in JSON

Review Comment:
   Please provide an example to explain the scenario specifically, otherwise 
why would we want to do such a hack on a string that is indeed JSON.



##########
apisix/plugins/ai-proxy/schema.lua:
##########
@@ -0,0 +1,167 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local _M = {}
+
+local auth_schema = {
+    type = "object",
+    properties = {
+        source = {
+            type = "string",
+            enum = {"header", "param"}
+        },
+        name = {
+            type = "string",
+            description = "Name of the param/header carrying Authorization or 
API key.",
+            minLength = 1,
+        },
+        value = {
+            type = "string",
+            description = "Full auth-header/param value.",
+            minLength = 1,
+             -- TODO encrypted = true,
+        },
+    },
+    required = { "source", "name", "value" },
+    additionalProperties = false,
+}
+
+local model_options_schema = {
+    description = "Key/value settings for the model",
+    type = "object",
+    properties = {
+        max_tokens = {
+            type = "integer",
+            description = "Defines the max_tokens, if using chat or completion 
models.",
+            default = 256
+
+        },
+        input_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in your prompt.",
+            minimum = 0
+
+        },
+        output_cost = {
+            type = "number",
+            description = "Defines the cost per 1M tokens in the output of the 
AI.",
+            minimum = 0
+
+        },
+        temperature = {
+            type = "number",
+            description = "Defines the matching temperature, if using chat or 
completion models.",
+            minimum = 0.0,
+            maximum = 5.0,
+
+        },
+        top_p = {
+            type = "number",
+            description = "Defines the top-p probability mass, if supported.",
+            minimum = 0,
+            maximum = 1,
+
+        },
+        upstream_host = {
+            type = "string",
+            description = "To be specified to override the host of the AI 
provider",
+        },
+        upstream_port = {
+            type = "integer",
+            description = "To be specified to override the AI provider port",
+
+        },
+        upstream_path = {
+            type = "string",
+            description = "To be specified to override the URL to the AI 
provider endpoints",
+        },
+        response_streaming = {
+            description = "Stream response by SSE",
+            type = "boolean",
+            default = false,
+        }
+    }
+}
+
+local model_schema = {
+    type = "object",
+    properties = {
+        provider = {
+            type = "string",
+            description = "AI provider request format - kapisix translates "
+                .. "requests to and from the specified backend compatible 
formats.",
+            oneOf = { "openai" }, -- add more providers later
+
+        },
+        name = {
+            type = "string",
+            description = "Model name to execute.",
+        },
+        options = model_options_schema,
+    },
+    required = {"provider", "name"}
+}
+
+_M.plugin_schema = {
+    type = "object",
+    properties = {
+        route_type = {
+            type = "string",
+            enum = { "llm/chat", "llm/completions", "passthrough" }
+        },
+        auth = auth_schema,
+        model = model_schema,
+    },
+    required = {"route_type", "model", "auth"}
+}
+
+_M.chat_request_schema = {
+    type = "object",
+    properties = {
+        messages = {
+            type = "array",
+            minItems = 1,
+            items = {
+                properties = {
+                    role = {
+                        type = "string",
+                        enum = {"system", "user", "assistant"}
+                    },
+                    content = {
+                        type = "string",
+                        minLength = "1",
+                    },
+                },
+                additionalProperties = false,
+                required = {"role", "content"},
+            },
+        }

Review Comment:
   What design specifications do they follow? for OpenAI?
   Does this mean that every new provider in the future will use this format?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@apisix.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to