This is an automated email from the ASF dual-hosted git repository.
wenming pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git
The following commit(s) were added to refs/heads/master by this push:
new 98deae78c feat: add lago plugin (#12196)
98deae78c is described below
commit 98deae78c5df5c049f62800ce52d80d35e09aa8a
Author: Zeping Bai <[email protected]>
AuthorDate: Wed May 21 15:16:57 2025 +0800
feat: add lago plugin (#12196)
---
apisix/cli/config.lua | 1 +
apisix/plugins/lago.lua | 229 +++++++++++++++++++++++++++
conf/config.yaml.example | 1 +
docs/en/latest/config.json | 3 +-
docs/en/latest/plugins/lago.md | 255 +++++++++++++++++++++++++++++
t/admin/plugins.t | 1 +
t/plugin/lago.spec.mts | 352 +++++++++++++++++++++++++++++++++++++++++
t/plugin/lago.t | 77 +++++++++
8 files changed, 918 insertions(+), 1 deletion(-)
diff --git a/apisix/cli/config.lua b/apisix/cli/config.lua
index 835a0b02c..10f5969e6 100644
--- a/apisix/cli/config.lua
+++ b/apisix/cli/config.lua
@@ -250,6 +250,7 @@ local _M = {
"public-api",
"prometheus",
"datadog",
+ "lago",
"loki-logger",
"elasticsearch-logger",
"echo",
diff --git a/apisix/plugins/lago.lua b/apisix/plugins/lago.lua
new file mode 100644
index 000000000..3c5b1f166
--- /dev/null
+++ b/apisix/plugins/lago.lua
@@ -0,0 +1,229 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local type = type
+local pairs = pairs
+local math_random = math.random
+local ngx = ngx
+
+local http = require("resty.http")
+local bp_manager_mod = require("apisix.utils.batch-processor-manager")
+local core = require("apisix.core")
+local str_format = core.string.format
+
+local plugin_name = "lago"
+local batch_processor_manager = bp_manager_mod.new("lago logger")
+
+local schema = {
+ type = "object",
+ properties = {
+ -- core configurations
+ endpoint_addrs = {
+ type = "array",
+ minItems = 1,
+ items = core.schema.uri_def,
+ description = "Lago API address, like http://127.0.0.1:3000, "
+ .. "it supports both self-hosted and cloud. If
multiple endpoints are"
+ .. " configured, the log will be pushed to a randomly
determined"
+ .. " endpoint from the list.",
+ },
+ endpoint_uri = {
+ type = "string",
+ minLength = 1,
+ default = "/api/v1/events/batch",
+ description = "Lago API endpoint, it needs to be set to the batch
send endpoint.",
+ },
+ token = {
+ type = "string",
+ description = "Lago API key, create one for your organization on
dashboard."
+ },
+ event_transaction_id = {
+ type = "string",
+ description = "Event's transaction ID, it is used to identify and
de-duplicate"
+ .. " the event, it supports string templates
containing APISIX and"
+ .. " NGINX variables, like \"req_${request_id}\",
which allows you"
+ .. " to use values returned by upstream services or
request-id"
+ .. " plugin integration",
+ },
+ event_subscription_id = {
+ type = "string",
+ description = "Event's subscription ID, which is automatically
generated or"
+ .. " specified by you when you assign the plan to the
customer on"
+ .. " Lago, used to associate API consumption to a
customer subscription,"
+ .. " it supports string templates containing APISIX
and NGINX variables,"
+ .. " like \"cus_${consumer_name}\", which allows you
to use values"
+ .. " returned by upstream services or APISIX consumer",
+ },
+ event_code = {
+ type = "string",
+ description = "Lago billable metric's code for associating an
event to a specified"
+ .. "billable item",
+ },
+ event_properties = {
+ type = "object",
+ patternProperties = {
+ [".*"] = {
+ type = "string",
+ minLength = 1,
+ },
+ },
+ description = "Event's properties, used to attach information to
an event, this"
+ .. " allows you to send certain information on a event
to Lago, such"
+ .. " as sending HTTP status to take a failed request
off the bill, or"
+ .. " sending the AI token consumption in the response
body for accurate"
+ .. " billing, its keys are fixed strings and its
values can be string"
+ .. " templates containing APISIX and NGINX variables,
like \"${status}\""
+ },
+
+ -- connection layer configurations
+ ssl_verify = {type = "boolean", default = true},
+ timeout = {
+ type = "integer",
+ minimum = 1,
+ maximum = 60000,
+ default = 3000,
+ description = "timeout in milliseconds",
+ },
+ keepalive = {type = "boolean", default = true},
+ keepalive_timeout = {
+ type = "integer",
+ minimum = 1000,
+ default = 60000,
+ description = "keepalive timeout in milliseconds",
+ },
+ keepalive_pool = {type = "integer", minimum = 1, default = 5},
+ },
+ required = {"endpoint_addrs", "token", "event_transaction_id",
"event_subscription_id",
+ "event_code"},
+ encrypt_fields = {"token"},
+}
+schema = batch_processor_manager:wrap_schema(schema)
+
+-- According to https://getlago.com/docs/api-reference/events/batch, the
maximum batch size is 100,
+-- so we have to override the default batch size to make it work out of the
box,the plugin does
+-- not set a maximum limit, so if Lago relaxes the limit, then user can modify
it
+-- to a larger batch size
+-- This does not affect other plugins, schema is appended after deep copy
+schema.properties.batch_max_size.default = 100
+
+
+local _M = {
+ version = 0.1,
+ priority = 415,
+ name = plugin_name,
+ schema = schema,
+}
+
+
+function _M.check_schema(conf, schema_type)
+ local check = {"endpoint_addrs"}
+ core.utils.check_https(check, conf, plugin_name)
+ core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name)
+
+ return core.schema.check(schema, conf)
+end
+
+
+local function send_http_data(conf, data)
+ local body, err = core.json.encode(data)
+ if not body then
+ return false, str_format("failed to encode json: %s", err)
+ end
+ local params = {
+ headers = {
+ ["Content-Type"] = "application/json",
+ ["Authorization"] = "Bearer " .. conf.token,
+ },
+ keepalive = conf.keepalive,
+ ssl_verify = conf.ssl_verify,
+ method = "POST",
+ body = body,
+ }
+
+ if conf.keepalive then
+ params.keepalive_timeout = conf.keepalive_timeout
+ params.keepalive_pool = conf.keepalive_pool
+ end
+
+ local httpc, err = http.new()
+ if not httpc then
+ return false, str_format("create http client error: %s", err)
+ end
+ httpc:set_timeout(conf.timeout)
+
+ -- select an random endpoint and build URL
+ local endpoint_url =
conf.endpoint_addrs[math_random(#conf.endpoint_addrs)]..conf.endpoint_uri
+ local res, err = httpc:request_uri(endpoint_url, params)
+ if not res then
+ return false, err
+ end
+
+ if res.status >= 300 then
+ return false, str_format("lago api returned status: %d, body: %s",
+ res.status, res.body or "")
+ end
+
+ return true
+end
+
+
+function _M.log(conf, ctx)
+ -- build usage event
+ local event_transaction_id, err =
core.utils.resolve_var(conf.event_transaction_id, ctx.var)
+ if err then
+ core.log.error("failed to resolve event_transaction_id, event dropped:
", err)
+ return
+ end
+
+ local event_subscription_id, err =
core.utils.resolve_var(conf.event_subscription_id, ctx.var)
+ if err then
+ core.log.error("failed to resolve event_subscription_id, event
dropped: ", err)
+ return
+ end
+
+ local entry = {
+ transaction_id = event_transaction_id,
+ external_subscription_id = event_subscription_id,
+ code = conf.event_code,
+ timestamp = ngx.req.start_time(),
+ }
+
+ if conf.event_properties and type(conf.event_properties) == "table" then
+ entry.properties = core.table.deepcopy(conf.event_properties)
+ for key, value in pairs(entry.properties) do
+ local new_val, err, n_resolved = core.utils.resolve_var(value,
ctx.var)
+ if not err and n_resolved > 0 then
+ entry.properties[key] = new_val
+ end
+ end
+ end
+
+ if batch_processor_manager:add_entry(conf, entry) then
+ return
+ end
+
+ -- generate a function to be executed by the batch processor
+ local func = function(entries)
+ return send_http_data(conf, {
+ events = entries,
+ })
+ end
+
+ batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func)
+end
+
+
+return _M
diff --git a/conf/config.yaml.example b/conf/config.yaml.example
index 0b980a3b4..7e66f2b9c 100644
--- a/conf/config.yaml.example
+++ b/conf/config.yaml.example
@@ -514,6 +514,7 @@ plugins: # plugin list (sorted by
priority)
- public-api # priority: 501
- prometheus # priority: 500
- datadog # priority: 495
+ - lago # priority: 415
- loki-logger # priority: 414
- elasticsearch-logger # priority: 413
- echo # priority: 412
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index 8c708f7ef..9531d908e 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -212,7 +212,8 @@
"plugins/loggly",
"plugins/elasticsearch-logger",
"plugins/tencent-cloud-cls",
- "plugins/loki-logger"
+ "plugins/loki-logger",
+ "plugins/lago"
]
}
]
diff --git a/docs/en/latest/plugins/lago.md b/docs/en/latest/plugins/lago.md
new file mode 100644
index 000000000..88ef8bf9a
--- /dev/null
+++ b/docs/en/latest/plugins/lago.md
@@ -0,0 +1,255 @@
+---
+title: lago
+keywords:
+ - Apache APISIX
+ - API Gateway
+ - Plugin
+ - lago
+ - monetization
+ - github.com/getlago/lago
+description: The lago plugin reports usage to a Lago instance, which allows
users to integrate Lago with APISIX for API monetization.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `lago` plugin pushes requests and responses to [Lago
Self-hosted](https://github.com/getlago/lago) and [Lago
Cloud](https://getlago.com) via the Lago REST API. the plugin allows you to use
it with a variety of APISIX built-in features, such as the APISIX consumer and
the request-id plugin.
+
+This allows for API monetization or let APISIX to be an AI gateway for AI
tokens billing scenarios.
+
+:::note disclaimer
+
+Lago owns its trademarks and controls its commercial products and open source
projects.
+
+The [https://github.com/getlago/lago](https://github.com/getlago/lago) project
uses the `AGPL-3.0` license instead of the `Apache-2.0` license that is the
same as Apache APISIX. As a user, you will need to evaluate for yourself
whether it is applicable to your business to use the project in a compliant way
or to obtain another type of license from Lago. Apache APISIX community does
not endorse it.
+
+The plugin does not contain any proprietary code or SDKs from Lago, it is
contributed by contributors to Apache APISIX and licensed under the
`Apache-2.0` license, which is in line with any other part of APISIX and you
don't need to worry about its compliance.
+
+:::
+
+When enabled, the plugin will collect information from the request context
(e.g. event code, transaction ID, associated subscription ID) as configured and
serialize them into [Event JSON
objects](https://getlago.com/docs/api-reference/events/event-object) as
required by Lago. They will be added to the buffer and sent to Lago in batches
of up to 100. This batch size is a
[requirement](https://getlago.com/docs/api-reference/events/batch) from Lago.
If you want to modify it, see [batch proc [...]
+
+## Attributes
+
+| Name | Type | Required | Default | Valid values | Description |
+|---|---|---|---|---|---|
+| endpoint_addrs | array[string] | True | | | Lago API address, such as
`http://127.0.0.1:3000`. It supports both self-hosted Lago and Lago Cloud. If
multiple endpoints are configured, the log will be pushed to a randomly
selected endpoint from the list. |
+| endpoint_uri | string | False | /api/v1/events/batch | | Lago API endpoint
for [batch usage events](https://docs.getlago.com/api-reference/events/batch). |
+| token | string | True | | | Lago API key created in the Lago dashboard. |
+| event_transaction_id | string | True | | | Event's transaction ID, used to
identify and de-duplicate the event. It supports string templates containing
APISIX and NGINX variables, such as `req_${request_id}`, which allows you to
use values returned by upstream services or the `request-id` plugin. |
+| event_subscription_id | string | True | | | Event's subscription ID, which
is automatically generated or configured when you assign the plan to the
customer on Lago. This is used to associate API consumption to a customer
subscription and supports string templates containing APISIX and NGINX
variables, such as `cus_${consumer_name}`, which allows you to use values
returned by upstream services or APISIX consumer. |
+| event_code | string | True | | | Lago billable metric's code for
associating an event to a specified billable item. |
+| event_properties | object | False | | | Event's properties, used to attach
information to an event. This allows you to send certain information on an
event to Lago, such as the HTTP status to exclude failed requests from billing,
or the AI token consumption in the response body for accurate billing. The keys
are fixed strings, while the values can be string templates containing APISIX
and NGINX variables, such as `${status}`. |
+| ssl_verify | boolean | False | true | | If true, verify
Lago's SSL certificates. |
+| timeout | integer | False | 3000 | [1, 60000] | Timeout
for the Lago service HTTP call in milliseconds. |
+| keepalive | boolean | False | true | | If true, keep the
connection alive for multiple requests. |
+| keepalive_timeout | integer | False | 60000 | >=1000 | Keepalive
timeout in milliseconds. |
+| keepalive_pool | integer | False | 5 | >=1 | Maximum
number of connections in the connection pool. |
+
+This Plugin supports using batch processors to aggregate and process events in
a batch. This avoids the need for frequently submitting the data. The batch
processor submits data every `5` seconds or when the data in the queue reaches
`1000`. See [Batch Processor](../batch-processor.md#configuration) for more
information or setting your custom configuration.
+
+## Examples
+
+The examples below demonstrate how you can configure `lago` Plugin for typical
scenario.
+
+To follow along the examples, start a Lago instance. Refer to
[https://github.com/getlago/lago](https://github.com/getlago/lago) or use Lago
Cloud.
+
+Follow these brief steps to configure Lago:
+
+1. Get the Lago API Key (also known as `token`), from the __Developer__ page
of the Lago dashboard.
+2. Next, create a billable metric used by APISIX, assuming its code is `test`.
Set the `Aggregation type` to `Count`; and add a filter with a key of `tier`
whose value contains `expensive` to allow us to distinguish between API values,
which will be demonstrated later.
+3. Create a plan and add the created metric to it. Its code can be configured
however you like. In the __Usage-based charges__ section, add the billable
metric created previously as a `Metered charge` item. Specify the default price
as `$1`. Add a filter, use `tier: expensive` to perform the filtering, and
specify its price as `$10`.
+4. Select an existing consumer or create a new one to assign the plan you just
created. You need to specify a `Subscription external ID` (or you can have Lago
generate it), which will be used as the APISIX consumer username.
+
+Next we need to configure APISIX for demonstrations.
+
+:::note
+
+You can fetch the `admin_key` from `config.yaml` and save to an environment
variable with the following command:
+
+```bash
+admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed
's/"//g')
+```
+
+:::
+
+### Report API call usage
+
+The following example demonstrates how you can configure the `lago` Plugin on
a Route to measuring API call usage.
+
+Create a Route with the `lago`, `request-id`, `key-auth` Plugins as such:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \
+ -H "X-API-KEY: ${admin_key}" \
+ -d '{
+ "id": "lago-route-1",
+ "uri": "/get",
+ "plugins": {
+ "request-id": {
+ "include_in_response": true
+ },
+ "key-auth": {},
+ "lago": {
+ "endpoint_addrs": ["http://12.0.0.1:3000"],
+ "token": "<Get token from Lago dashboard>",
+ "event_transaction_id": "${http_x_request_id}",
+ "event_subscription_id": "${http_x_consumer_username}",
+ "event_code": "test"
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "httpbin.org:80": 1
+ },
+ "type": "roundrobin"
+ }
+ }'
+```
+
+Create a second route with the `lago`, `request-id`, `key-auth` Plugin as such:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \
+ -H "X-API-KEY: ${admin_key}" \
+ -d '{
+ "id": "lago-route-2",
+ "uri": "/anything",
+ "plugins": {
+ "request-id": {
+ "include_in_response": true
+ },
+ "key-auth": {},
+ "lago": {
+ "endpoint_addrs": ["http://12.0.0.1:3000"],
+ "token": "<Get token from Lago dashboard>",
+ "event_transaction_id": "${http_x_request_id}",
+ "event_subscription_id": "${http_x_consumer_username}",
+ "event_code": "test",
+ "event_properties": {
+ "tier": "expensive"
+ }
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "httpbin.org:80": 1
+ },
+ "type": "roundrobin"
+ }
+ }'
+```
+
+Create a Consumer:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \
+ -H "X-API-KEY: ${admin_key}" \
+ -d '{
+ "username": "<Lago subscription external ID>",
+ "plugins": {
+ "key-auth": {
+ "key": "demo"
+ }
+ }
+ }'
+```
+
+Send three requests to the two routes respectively:
+
+```shell
+curl "http://127.0.0.1:9080/get"
+curl "http://127.0.0.1:9080/get"
+curl "http://127.0.0.1:9080/get"
+curl "http://127.0.0.1:9080/anything"
+curl "http://127.0.0.1:9080/anything"
+curl "http://127.0.0.1:9080/anything"
+```
+
+You should receive `HTTP/1.1 200 OK` responses for all requests.
+
+Wait a few seconds, then navigate to the __Developer__ page in the Lago
dashboard. Under __Events__, you should see 6 event entries sent by APISIX.
+
+If the self-hosted instance's event worker is configured correctly (or if
you're using Lago Cloud), you can also see the total amount consumed in real
time in the consumer's subscription usage, which should be `3 * $1 + 3 * $10 =
$33` according to our demo use case.
+
+## FAQ
+
+### Purpose of the Plugin
+
+When you make an effort to monetize your API, it's hard to find a ready-made,
low-cost solution, so you may have to build your own billing stack, which is
complicated.
+
+This plugin allows you to use APISIX to handle API proxies and use Lago as a
billing stack through direct integration with Lago, and both the APISIX open
source project and Lago will be part of your portfolio, which is a huge time
saver.
+
+Every API call results in a Lago event, which allows you to bill users for
real usage, i.e. pay-as-you-go, and thanks to our built-in transaction ID
(request ID) support, you can simply implement API call logging and
troubleshooting for your customers.
+
+In addition to typical API monetization scenarios, APISIX can also do AI
tokens-based billing when it is acting as an AI gateway, where each Lago event
generated by an API request includes exactly how many tokens were consumed, to
allow you to charge the user for a fine-grained per-tokens usage.
+
+### Is it flexible?
+
+Of course, the fact that we make transaction ID, subscription ID as a
configuration item and allow you to use APISIX and NGINX variables in it means
that it's simple to integrate the plugin with any existing or your own
authentication and internal services.
+
+- Use custom authentication: as long as the Lago subscription ID represented
by the user ID is registered as an APISIX variable, it will be available from
there, so custom authentication is completely possible!
+- Integration with internal services: You might not need the APISIX built-in
request-id plugin. That's OK. You can have your internal service (APISIX
upstream) generate it and include it in the HTTP response header. Then you can
access it via an NGINX variable in the transaction ID.
+
+Event properties are supported, allowing you to set special values for
specific APIs. For example, if your service has 100 APIs, you can enable
general billing for all of them while customizing a few with different
pricing—just as demonstrated above.
+
+### Which Lago versions does it work with?
+
+When we first developed the Lago plugin, it was released to `1.17.0`, which we
used for integration, so it works at least with `1.17.0`.
+
+Technically, we use the Lago batch event API to submit events in batches, and
APISIX will only use this API, so as long as Lago doesn't make any disruptive
changes to this API, APISIX will be able to integrate with it.
+
+Here's an [archive
page](https://web.archive.org/web/20250516073803/https://getlago.com/docs/api-reference/events/batch)
of the API documentation, which allows you to check the differences between
the API at the time of our integration and the latest API.
+
+If the latest API changes, you can submit an issue to inform the APISIX
maintainers that this may require some changes.
+
+### Why Lago can't receive events?
+
+Look at `error.log` for such a log.
+
+```text
+2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua]
batch-processor.lua:95: Batch Processor[lago logger] failed to process entries:
lago api returned status: 400, body: <error message>, context: ngx.timer,
client: 127.0.0.1, server: 0.0.0.0:9080
+```
+
+The error can be diagnosed based on the error code in the `failed to process
entries: lago api returned status: 400, body: <error message>` and the response
body of the lago server.
+
+### Reliability of reporting
+
+The plugin may encounter a network problem that prevents the node where the
gateway is located from communicating with the Lago API, in which case APISIX
will discard the batch according to the [batch
processor](../batch-processor.md) configuration, the batch will be discarded if
the specified number of retries are made and the dosage still cannot be sent.
+
+Discarded events are permanently lost, so it is recommended that you use this
plugin in conjunction with other logging mechanisms and perform event replay
after Lago is unavailable causing data to be discarded to ensure that all logs
are correctly sent to Lago.
+
+### Will the event duplicate?
+
+While APISIX performs retries based on the [batch
processor](../batch-processor.md) configuration, you don't need to worry about
duplicate events being reported to Lago.
+
+The `event_transcation_id` and `timestamp` are generated and logged after the
request is processed on the APISIX side, and Lago de-duplicates the event based
on them.
+So even if a retry is triggered because the network causes Lago to send a
`success` response that is not received by APISIX, the event is still not
duplicated on Lago.
+
+### Performance Impacts
+
+The plugin is logically simple and reliable; it simply builds a Lago event
object for each request, buffers and sends them in bulk. The logic is not
coupled to the request proxy path, so this does not cause latency to rise for
requests going through the gateway.
+
+Technically, the logic is executed in the NGINX log phase and [batch
processor](../batch-processor.md) timer, so this does not affect the request
itself.
+
+### Resource overhead
+
+As explained earlier in the performance impact section, the plugin doesn't
cause a significant increase in system resources. It only uses a small amount
of memory to store events for batching.
diff --git a/t/admin/plugins.t b/t/admin/plugins.t
index bbacad6ab..d1683b46b 100644
--- a/t/admin/plugins.t
+++ b/t/admin/plugins.t
@@ -124,6 +124,7 @@ http-dubbo
public-api
prometheus
datadog
+lago
loki-logger
elasticsearch-logger
echo
diff --git a/t/plugin/lago.spec.mts b/t/plugin/lago.spec.mts
new file mode 100644
index 000000000..36833cdb8
--- /dev/null
+++ b/t/plugin/lago.spec.mts
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { generateKeyPair } from 'node:crypto';
+import { existsSync } from 'node:fs';
+import { readFile, rm, writeFile } from 'node:fs/promises';
+import { promisify } from 'node:util';
+
+import { afterAll, beforeAll, describe, expect, it } from '@jest/globals';
+import axios from 'axios';
+import * as compose from 'docker-compose';
+import { gql, request } from 'graphql-request';
+import { Api as LagoApi, Client as LagoClient } from 'lago-javascript-client';
+import simpleGit from 'simple-git';
+import * as YAML from 'yaml';
+
+import { request as requestAdminAPI } from '../ts/admin_api';
+import { wait } from '../ts/utils';
+
+const LAGO_VERSION = 'v1.27.0';
+const LAGO_PATH = '/tmp/lago';
+const LAGO_FRONT_PORT = 59999;
+const LAGO_API_PORT = 30699;
+const LAGO_API_URL = `http://127.0.0.1:${LAGO_API_PORT}`;
+const LAGO_API_BASEURL = `http://127.0.0.1:${LAGO_API_PORT}/api/v1`;
+const LAGO_API_GRAPHQL_ENDPOINT = `${LAGO_API_URL}/graphql`;
+const LAGO_BILLABLE_METRIC_CODE = 'test';
+const LAGO_EXTERNAL_SUBSCRIPTION_ID = 'jack_test';
+
+// The project uses AGPLv3, so we can't store the docker compose file it uses
in our repository and download it during testing.
+const downloadComposeFile = async () =>
+ simpleGit().clone('https://github.com/getlago/lago', LAGO_PATH, {
+ '--depth': '1',
+ '--branch': LAGO_VERSION,
+ });
+
+const launchLago = async () => {
+ // patch docker-compose.yml to disable useless port
+ const composeFilePath = `${LAGO_PATH}/docker-compose.yml`;
+ const composeFile = YAML.parse(await readFile(composeFilePath, 'utf8'));
+ delete composeFile.services.front; // front-end is not needed for tests
+ delete composeFile.services['api-clock']; // clock is not needed for tests
+ delete composeFile.services['api-worker']; // worker is not needed for tests
+ delete composeFile.services['pdf']; // pdf is not needed for tests
+ delete composeFile.services.redis.ports; // prevent port conflict
+ delete composeFile.services.db.ports; // prevent port conflict
+ await writeFile(composeFilePath, YAML.stringify(composeFile), 'utf8');
+
+ // launch services
+ const { privateKey } = await promisify(generateKeyPair)('rsa', {
+ modulusLength: 2048,
+ publicKeyEncoding: { type: 'pkcs1', format: 'pem' },
+ privateKeyEncoding: { type: 'pkcs1', format: 'pem' },
+ });
+ const composeOpts: compose.IDockerComposeOptions = {
+ cwd: LAGO_PATH,
+ log: true,
+ env: {
+ LAGO_RSA_PRIVATE_KEY: Buffer.from(privateKey).toString('base64'),
+ FRONT_PORT: `${LAGO_FRONT_PORT}`, // avoiding conflicts, tests do not
require a front-end
+ API_PORT: `${LAGO_API_PORT}`,
+ LAGO_FRONT_URL: `http://127.0.0.1:${LAGO_FRONT_PORT}`,
+ LAGO_API_URL,
+ },
+ };
+
+ await compose.createAll(composeOpts);
+ await compose.upOne('api', composeOpts);
+ await compose.exec('api', 'rails db:create', composeOpts);
+ await compose.exec('api', 'rails db:migrate', composeOpts);
+ await compose.upAll(composeOpts);
+};
+
+const provisionLago = async () => {
+ // sign up
+ const { registerUser } = await request<{
+ registerUser: { token: string; user: { organizations: { id: string } } };
+ }>(
+ LAGO_API_GRAPHQL_ENDPOINT,
+ gql`
+ mutation signup($input: RegisterUserInput!) {
+ registerUser(input: $input) {
+ token
+ user {
+ id
+ organizations {
+ id
+ }
+ }
+ }
+ }
+ `,
+ {
+ input: {
+ email: '[email protected]',
+ password: 'Admin000!',
+ organizationName: 'test',
+ },
+ },
+ );
+
+ const webToken = registerUser.token;
+ const organizationId = registerUser.user.organizations[0].id;
+ const requestHeaders = {
+ Authorization: `Bearer ${webToken}`,
+ 'X-Lago-Organization': organizationId,
+ };
+
+ // list api keys
+ const { apiKeys } = await request<{
+ apiKeys: { collection: { id: string }[] };
+ }>(
+ LAGO_API_GRAPHQL_ENDPOINT,
+ gql`
+ query getApiKeys {
+ apiKeys(page: 1, limit: 20) {
+ collection {
+ id
+ }
+ }
+ }
+ `,
+ {},
+ requestHeaders,
+ );
+
+ // get first api key
+ const { apiKey } = await request<{ apiKey: { value: string } }>(
+ LAGO_API_GRAPHQL_ENDPOINT,
+ gql`
+ query getApiKeyValue($id: ID!) {
+ apiKey(id: $id) {
+ id
+ value
+ }
+ }
+ `,
+ { id: apiKeys.collection[0].id },
+ requestHeaders,
+ );
+
+ const lagoClient = LagoClient(apiKey.value, { baseUrl: LAGO_API_BASEURL });
+
+ // create billable metric
+ const { data: billableMetric } =
+ await lagoClient.billableMetrics.createBillableMetric({
+ billable_metric: {
+ name: LAGO_BILLABLE_METRIC_CODE,
+ code: LAGO_BILLABLE_METRIC_CODE,
+ aggregation_type: 'count_agg',
+ filters: [
+ {
+ key: 'tier',
+ values: ['normal', 'expensive'],
+ },
+ ],
+ },
+ });
+
+ // create plan
+ const { data: plan } = await lagoClient.plans.createPlan({
+ plan: {
+ name: 'test',
+ code: 'test',
+ interval: 'monthly',
+ amount_cents: 0,
+ amount_currency: 'USD',
+ pay_in_advance: false,
+ charges: [
+ {
+ billable_metric_id: billableMetric.billable_metric.lago_id,
+ charge_model: 'standard',
+ pay_in_advance: false,
+ properties: { amount: '1' },
+ filters: [
+ {
+ properties: { amount: '10' },
+ values: { tier: ['expensive'] },
+ },
+ ],
+ },
+ ],
+ },
+ });
+
+ // create customer
+ const external_customer_id = 'jack';
+ const { data: customer } = await lagoClient.customers.createCustomer({
+ customer: {
+ external_id: external_customer_id,
+ name: 'Jack',
+ currency: 'USD',
+ },
+ });
+
+ // assign plan to customer
+ await lagoClient.subscriptions.createSubscription({
+ subscription: {
+ external_customer_id: customer.customer.external_id,
+ plan_code: plan.plan.code,
+ external_id: LAGO_EXTERNAL_SUBSCRIPTION_ID,
+ },
+ });
+
+ return { apiKey: apiKey.value, client: lagoClient };
+};
+
+describe('Plugin - Lago', () => {
+ const JACK_USERNAME = 'jack_test';
+ const client = axios.create({ baseURL: 'http://127.0.0.1:1984' });
+
+ let restAPIKey: string;
+ let lagoClient: LagoApi<unknown>; // prettier-ignore
+
+ // set up
+ beforeAll(async () => {
+ if (existsSync(LAGO_PATH)) await rm(LAGO_PATH, { recursive: true });
+ await downloadComposeFile();
+ await launchLago();
+ let res = await provisionLago();
+ restAPIKey = res.apiKey;
+ lagoClient = res.client;
+ }, 120 * 1000);
+
+ // clean up
+ afterAll(async () => {
+ await compose.downAll({
+ cwd: LAGO_PATH,
+ commandOptions: ['--volumes'],
+ });
+ await rm(LAGO_PATH, { recursive: true });
+ }, 30 * 1000);
+
+ it('should create route', async () => {
+ await expect(
+ requestAdminAPI('/apisix/admin/routes/1', 'PUT', {
+ uri: '/hello',
+ upstream: {
+ nodes: {
+ '127.0.0.1:1980': 1,
+ },
+ type: 'roundrobin',
+ },
+ plugins: {
+ 'request-id': { include_in_response: true }, // for transaction_id
+ 'key-auth': {}, // for subscription_id
+ lago: {
+ endpoint_addrs: [LAGO_API_URL],
+ token: restAPIKey,
+ event_transaction_id: '${http_x_request_id}',
+ event_subscription_id: '${http_x_consumer_username}',
+ event_code: 'test',
+ batch_max_size: 1, // does not buffered usage reports
+ },
+ },
+ }),
+ ).resolves.not.toThrow();
+
+ await expect(
+ requestAdminAPI('/apisix/admin/routes/2', 'PUT', {
+ uri: '/hello1',
+ upstream: {
+ nodes: {
+ '127.0.0.1:1980': 1,
+ },
+ type: 'roundrobin',
+ },
+ plugins: {
+ 'request-id': { include_in_response: true },
+ 'key-auth': {},
+ lago: {
+ endpoint_addrs: [LAGO_API_URL],
+ token: restAPIKey,
+ event_transaction_id: '${http_x_request_id}',
+ event_subscription_id: '${http_x_consumer_username}',
+ event_code: 'test',
+ event_properties: { tier: 'expensive' },
+ batch_max_size: 1,
+ },
+ },
+ }),
+ ).resolves.not.toThrow();
+ });
+
+ it('should create consumer', async () =>
+ expect(
+ requestAdminAPI(`/apisix/admin/consumers/${JACK_USERNAME}`, 'PUT', {
+ username: JACK_USERNAME,
+ plugins: {
+ 'key-auth': { key: JACK_USERNAME },
+ },
+ }),
+ ).resolves.not.toThrow());
+
+ it('call API (without key)', async () => {
+ const res = await client.get('/hello', { validateStatus: () => true });
+ expect(res.status).toEqual(401);
+ });
+
+ it('call normal API', async () => {
+ for (let i = 0; i < 3; i++) {
+ await expect(
+ client.get('/hello', { headers: { apikey: JACK_USERNAME } }),
+ ).resolves.not.toThrow();
+ }
+ await wait(500);
+ });
+
+ it('check Lago events (normal API)', async () => {
+ const { data } = await lagoClient.events.findAllEvents({
+ external_subscription_id: LAGO_EXTERNAL_SUBSCRIPTION_ID,
+ });
+
+ expect(data.events).toHaveLength(3);
+ expect(data.events[0].code).toEqual(LAGO_BILLABLE_METRIC_CODE);
+ });
+
+ let expensiveStartAt: Date;
+ it('call expensive API', async () => {
+ expensiveStartAt = new Date();
+ for (let i = 0; i < 3; i++) {
+ await expect(
+ client.get('/hello1', { headers: { apikey: JACK_USERNAME } }),
+ ).resolves.not.toThrow();
+ }
+ await wait(500);
+ });
+
+ it('check Lago events (expensive API)', async () => {
+ const { data } = await lagoClient.events.findAllEvents({
+ external_subscription_id: LAGO_EXTERNAL_SUBSCRIPTION_ID,
+ timestamp_from: expensiveStartAt.toISOString(),
+ });
+
+ expect(data.events).toHaveLength(3);
+ expect(data.events[0].code).toEqual(LAGO_BILLABLE_METRIC_CODE);
+ expect(data.events[1].properties).toEqual({ tier: 'expensive' });
+ });
+});
diff --git a/t/plugin/lago.t b/t/plugin/lago.t
new file mode 100644
index 000000000..7e1c64093
--- /dev/null
+++ b/t/plugin/lago.t
@@ -0,0 +1,77 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!defined $block->request) {
+ $block->set_value("request", "GET /t");
+ }
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+ location /t {
+ content_by_lua_block {
+ local test_cases = {
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token",
event_transaction_id = "tid", event_subscription_id = "sid", event_code =
"code"},
+ {endpoint_addrs = "http://127.0.0.1:3000", token = "token",
event_transaction_id = "tid", event_subscription_id = "sid", event_code =
"code"},
+ {endpoint_addrs = {}, token = "token", event_transaction_id =
"tid", event_subscription_id = "sid", event_code = "code"},
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, endpoint_uri =
"/test", token = "token", event_transaction_id = "tid", event_subscription_id =
"sid", event_code = "code"},
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, endpoint_uri =
1234, token = "token", event_transaction_id = "tid", event_subscription_id =
"sid", event_code = "code"},
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, token = 1234,
event_transaction_id = "tid", event_subscription_id = "sid", event_code =
"code"},
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token",
event_transaction_id = "tid", event_subscription_id = "sid", event_code =
"code", event_properties = {key = "value"}},
+ {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token",
event_transaction_id = "tid", event_subscription_id = "sid", event_code =
"code", event_properties = {1,2,3}},
+ }
+ local plugin = require("apisix.plugins.lago")
+
+ for _, case in ipairs(test_cases) do
+ local ok, err = plugin.check_schema(case)
+ ngx.say(ok and "done" or err)
+ end
+ }
+ }
+--- response_body
+done
+property "endpoint_addrs" validation failed: wrong type: expected array, got
string
+property "endpoint_addrs" validation failed: expect array to have at least 1
items
+done
+property "endpoint_uri" validation failed: wrong type: expected string, got
number
+property "token" validation failed: wrong type: expected string, got number
+done
+property "event_properties" validation failed: wrong type: expected object,
got table
+
+
+
+=== TEST 2: test
+--- timeout: 300
+--- max_size: 2048000
+--- exec
+cd t && pnpm test plugin/lago.spec.mts 2>&1
+--- no_error_log
+failed to execute the script with status
+--- response_body eval
+qr/PASS plugin\/lago.spec.mts/