This is an automated email from the ASF dual-hosted git repository.

pingtimeout pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/polaris-tools.git


The following commit(s) were added to refs/heads/main by this push:
     new 8543c60  chore(feat): add benchmarks/makefile with usability targets
8543c60 is described below

commit 8543c60a81dcca3b7a19275cfe8456cca9c7bebf
Author: Artur Rakhmatulin <[email protected]>
AuthorDate: Wed Dec 10 15:48:11 2025 +0000

    chore(feat): add benchmarks/makefile with usability targets
    
    The added Makefile includes
    
    * targets to list and delete reports
    * targets to run individual benchmarks
---
 benchmarks/Makefile  | 31 +++++++++++++++++++++++++++++++
 benchmarks/README.md | 26 ++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

diff --git a/benchmarks/Makefile b/benchmarks/Makefile
index a78a10b..ee9f0d2 100644
--- a/benchmarks/Makefile
+++ b/benchmarks/Makefile
@@ -36,3 +36,34 @@ read-update-simulation:
 create-commits-simulation:
        ./gradlew gatlingRun --simulation 
org.apache.polaris.benchmarks.simulations.CreateCommits \
       -Dconfig.file=./application.conf
+
+.PHONY: weighted-workload-simulation
+weighted-workload-simulation:
+       ./gradlew gatlingRun --simulation 
org.apache.polaris.benchmarks.simulations.WeightedWorkloadOnTreeDataset \
+      -Dconfig.file=./application.conf
+
+.PHONY: reports-list
+reports-list:
+       @for report in $$(ls -d build/reports/gatling/*/ 2>/dev/null | sort 
-r); do \
+               basename="$$(basename $$report)"; \
+               name="$$(echo $$basename | sed 's/-[0-9]\{17\}$$//')"; \
+               timestamp="$$(echo $$basename | grep -o '[0-9]\{17\}$$')"; \
+               if [ -n "$$timestamp" ]; then \
+                       date="$$(echo $$timestamp | cut -c1-8)"; \
+                       time="$$(echo $$timestamp | cut -c9-14)"; \
+                       millis="$$(echo $$timestamp | cut -c15-17)"; \
+                       formatted_date="$$(echo $$date | sed 
's/\([0-9]\{4\}\)\([0-9]\{2\}\)\([0-9]\{2\}\)/\1-\2-\3/')"; \
+                       formatted_time="$$(echo $$time | sed 
's/\([0-9]\{2\}\)\([0-9]\{2\}\)\([0-9]\{2\}\)/\1:\2:\3/')"; \
+                       index_path="$${report}index.html"; \
+                       if [ -f "$$index_path" ]; then \
+                               echo "$$name | $$formatted_date 
$$formatted_time.$$millis | $$index_path"; \
+                       else \
+                               echo "$$name | $$formatted_date 
$$formatted_time.$$millis | ERROR"; \
+                       fi; \
+               fi; \
+       done
+
+.PHONY: reports-clean
+reports-clean:
+       @rm -rf build/reports/gatling/*/ 2>/dev/null || true
+
diff --git a/benchmarks/README.md b/benchmarks/README.md
index 60f2c5f..954f6f5 100644
--- a/benchmarks/README.md
+++ b/benchmarks/README.md
@@ -131,6 +131,9 @@ make read-simulation
 
 # Commits creation
 make create-commits-simulation
+
+# Weighted workload
+make weighted-workload-simulation
 ```
 
 A message will show the location of the Gatling report:
@@ -155,6 +158,29 @@ For repeated testing and benchmarking purposes it's 
convenient to have fixed cli
 
 With the above you can run the benchmarks using a configuration file with 
`client-id = "admin"` and `client-secret = "admin"` - meant only for 
convenience in a fully airgapped system.
 
+### Benchmarks results
+
+1. **List all reports**
+```bash
+make reports-list
+```
+
+Ex. output:
+
+```
+readtreedataset | 2025-12-04 12:54:33.540 | 
build/reports/gatling/readtreedataset-20251204125433540/index.html
+```
+
+2. **Open the report in browser**
+```bash
+open build/reports/gatling/readtreedataset-20251204125433540/index.html
+```
+
+3. **Clean all reports**
+```bash
+make reports-clean
+```
+
 # Test Dataset
 
 The benchmarks use synthetic procedural datasets that are generated 
deterministically at runtime. This means that given the same input parameters, 
the exact same dataset structure will always be generated. This approach allows 
generating large volumes of test data without having to store it, while 
ensuring reproducible benchmark results across different runs.

Reply via email to