This is an automated email from the ASF dual-hosted git repository.

hanahmily pushed a commit to branch perf/trace-vs-stream
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb.git

commit f02b5b3026056c71916aca384084cbe06f478a33
Author: Gao Hongtao <[email protected]>
AuthorDate: Sat Sep 27 10:01:59 2025 +0000

    refactor: improve performance test setup and schema management
    
    - Enhanced the performance test suite by refining schema loading and 
verification logic for Stream and Trace models.
    - Updated the test suite to ensure accurate performance comparisons and 
streamlined execution of benchmarks.
    - Made adjustments to the schema definitions to improve clarity and 
usability in performance testing.
---
 test/stress/stream-vs-trace/docker/.gitignore      |  14 +
 test/stress/stream-vs-trace/docker/Dockerfile      |  50 +++
 test/stress/stream-vs-trace/docker/Makefile        |  67 ++++
 test/stress/stream-vs-trace/docker/README.md       | 263 +++++++++++++++
 .../stream-vs-trace/docker/client_wrappers.go      |  52 +++
 .../stream-vs-trace/docker/docker-compose.yml      |  96 ++++++
 test/stress/stream-vs-trace/docker/docker_test.go  | 148 ++++++++
 .../stream-vs-trace/docker/run-docker-test.sh      | 221 ++++++++++++
 .../stress/stream-vs-trace/docker/schema_client.go | 372 +++++++++++++++++++++
 .../stream-vs-trace/docker/wait-for-healthy.sh     |  41 +++
 10 files changed, 1324 insertions(+)

diff --git a/test/stress/stream-vs-trace/docker/.gitignore 
b/test/stress/stream-vs-trace/docker/.gitignore
new file mode 100644
index 00000000..fadb27cc
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/.gitignore
@@ -0,0 +1,14 @@
+# Docker volumes
+stream-data/
+trace-data/
+
+# Test output
+*.out
+*.log
+
+# Coverage files
+*.cov
+coverage.html
+
+# Binary files
+*.test
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/Dockerfile 
b/test/stress/stream-vs-trace/docker/Dockerfile
new file mode 100644
index 00000000..5e1b4d1a
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/Dockerfile
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Runtime stage
+FROM alpine:latest
+
+# Install runtime dependencies
+RUN apk add --no-cache ca-certificates wget
+
+# Create non-root user
+RUN adduser -D -g '' banyandb
+
+# Create data directory
+RUN mkdir -p /data && chown banyandb:banyandb /data
+
+# Copy banyand binary from builder
+# The release build creates binaries in build/bin/<OS>/<ARCH>/ format
+ARG TARGETARCH=amd64
+COPY banyand/build/bin/linux/${TARGETARCH}/banyand-server-static 
/usr/local/bin/banyand
+
+# Copy schema files for the test
+COPY --chown=banyandb:banyandb test/stress/stream-vs-trace/testdata/schema 
/schema
+
+# Switch to non-root user
+USER banyandb
+
+# Set data volume
+VOLUME ["/data"]
+
+# Expose BanyanDB ports
+EXPOSE 17912 17913 6060 2121
+
+# Set entrypoint
+ENTRYPOINT ["/usr/local/bin/banyand"]
+
+# Default command for standalone mode
+CMD ["standalone", "--root-path=/data"]
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/Makefile 
b/test/stress/stream-vs-trace/docker/Makefile
new file mode 100644
index 00000000..281747ae
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/Makefile
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: help build up down test clean logs ps stats all
+
+# Default target
+help:
+       @echo "Stream vs Trace Docker Test - Make Targets"
+       @echo "=========================================="
+       @echo "  all      - Run complete test (build, up, test, down)"
+       @echo "  build    - Build Docker images"
+       @echo "  up       - Start containers and wait for health"
+       @echo "  test     - Run tests (containers must be running)"
+       @echo "  down     - Stop and remove containers"
+       @echo "  clean    - Clean up everything including volumes"
+       @echo "  logs     - Show container logs"
+       @echo "  ps       - Show container status"
+       @echo "  stats    - Show container resource usage"
+
+# Run complete test workflow
+all:
+       ./run-docker-test.sh all
+
+# Build Docker images
+build:
+       ./run-docker-test.sh build
+
+# Start containers
+up:
+       ./run-docker-test.sh up
+
+# Stop containers
+down:
+       ./run-docker-test.sh down
+
+# Run the test (assumes containers are already running)
+test:
+       ./run-docker-test.sh test
+
+# Clean everything
+clean:
+       ./run-docker-test.sh clean
+
+# Show logs
+logs:
+       ./run-docker-test.sh logs
+
+# Show container status
+ps:
+       ./run-docker-test.sh ps
+
+# Show resource usage
+stats:
+       ./run-docker-test.sh stats
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/README.md 
b/test/stress/stream-vs-trace/docker/README.md
new file mode 100644
index 00000000..3e528ecb
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/README.md
@@ -0,0 +1,263 @@
+# Stream vs Trace Performance Test - Docker Version
+
+This directory contains the Docker-based implementation of the Stream vs Trace 
performance test.
+
+## Overview
+
+The test runs two separate BanyanDB instances in Docker containers:
+- **banyandb-stream**: Dedicated instance for stream model testing
+- **banyandb-trace**: Dedicated instance for trace model testing
+
+The test clients run outside the containers on the host machine, connecting to 
the containerized BanyanDB instances via exposed ports.
+
+## Architecture
+
+```
+┌─────────────────────────────┐
+│   Host Machine              │
+│                             │
+│  ┌─────────────────────┐    │
+│  │  Test Client        │    │
+│  │  (Go Test Suite)    │    │
+│  └──────┬──────┬───────┘    │
+│         │      │             │
+│    :17912  :27912           │
+└─────────┼──────┼────────────┘
+          │      │
+     ┌────▼──┐ ┌─▼─────┐
+     │Stream │ │ Trace │
+     │BanyanDB│ │BanyanDB│
+     │Container│ │Container│
+     └────────┘ └────────┘
+```
+
+## Resource Limits
+
+Each container is configured with:
+- **CPU**: 2 cores (limit), 1 core (reservation)
+- **Memory**: 4GiB (limit), 2GiB (reservation)
+
+## Port Mapping
+
+### Stream Container (banyandb-stream)
+- gRPC: 17912
+- HTTP: 17913
+- pprof: 6060
+- metrics: 2121
+
+### Trace Container (banyandb-trace)
+- gRPC: 27912 (offset by 10000)
+- HTTP: 27913 (offset by 10000)
+- pprof: 16060 (offset by 10000)
+- metrics: 12121 (offset by 10000)
+
+## Prerequisites
+
+- Docker and Docker Compose
+- Go 1.23 or later
+- GNU Make
+
+## Usage
+
+### Quick Start
+
+```bash
+# Run complete test workflow (build, start, test, stop)
+make all
+
+# Or using the script directly
+./run-docker-test.sh all
+```
+
+### Available Commands
+
+Both `make` and `./run-docker-test.sh` support the same commands:
+
+| Command | Description |
+|---------|-------------|
+| `all` | Run complete test workflow (build → up → test → down) |
+| `build` | Build BanyanDB binaries and Docker images |
+| `up` | Start containers and wait for health |
+| `test` | Run performance tests (containers must be running) |
+| `down` | Stop and remove containers |
+| `clean` | Clean up everything including volumes |
+| `logs` | Show container logs (follow mode) |
+| `ps` | Show container status |
+| `stats` | Show container resource usage |
+| `help` | Show usage information |
+
+### Common Workflows
+
+#### Full Test Run
+```bash
+# Using make
+make all
+
+# Using script
+./run-docker-test.sh all
+```
+
+#### Development Workflow
+```bash
+# Build and start containers
+make build up
+
+# Run tests multiple times
+make test
+make test
+
+# View logs in another terminal
+make logs
+
+# Check resource usage
+make stats
+
+# Clean up when done
+make down
+```
+
+#### Debugging Workflow
+```bash
+# Start containers
+make up
+
+# Check status
+make ps
+
+# View logs
+make logs
+
+# Run specific test
+export DOCKER_TEST=true
+cd test/stress/stream-vs-trace/docker
+go test -v -run TestStreamVsTraceDocker/specific_test
+
+# Keep containers running for inspection
+# (Don't run make down)
+```
+
+### Script Usage
+
+The `run-docker-test.sh` script provides the same functionality:
+
+```bash
+# Show help
+./run-docker-test.sh help
+
+# Run individual commands
+./run-docker-test.sh build
+./run-docker-test.sh up
+./run-docker-test.sh test
+./run-docker-test.sh down
+
+# View logs
+./run-docker-test.sh logs
+
+# Check status
+./run-docker-test.sh ps
+./run-docker-test.sh stats
+```
+
+## Files
+
+- `Dockerfile`: Multi-stage build that compiles BanyanDB release binaries and 
creates minimal runtime image
+- `docker-compose.yml`: Container orchestration configuration for stream and 
trace instances
+- `Makefile`: Convenient interface that delegates all operations to 
run-docker-test.sh
+- `run-docker-test.sh`: Main script containing all build, run, and test logic
+- `wait-for-healthy.sh`: Helper script to wait for containers to be healthy
+- `docker_test.go`: Test suite for Docker-based testing
+- `schema_client.go`: gRPC client for loading schemas into BanyanDB
+- `client_wrappers.go`: Wrapper clients exposing gRPC connections
+- `README.md`: This documentation
+
+## Architecture Details
+
+### Build Process
+1. The Dockerfile uses a multi-stage build:
+   - **Builder stage**: Compiles BanyanDB release binaries using Go 1.23
+   - **Runtime stage**: Creates minimal Alpine-based image with only the binary
+
+2. Release binaries are built with:
+   - Static linking for portability
+   - Platform-specific paths (e.g., `linux/amd64`)
+   - Optimizations enabled
+
+### Test Architecture
+- All logic is centralized in `run-docker-test.sh`
+- The Makefile provides a familiar interface but delegates to the script
+- This ensures consistency and single source of truth
+
+## Differences from Non-Docker Version
+
+The Docker version differs from the standalone version in several ways:
+
+1. **Isolation**: Each BanyanDB instance runs in its own container with 
dedicated resources
+2. **Port Mapping**: Trace instance uses ports offset by 10000 to avoid 
conflicts
+3. **Binary Type**: Uses statically-linked release binaries for better 
container compatibility
+4. **Schema Loading**: Schemas are loaded via gRPC after containers start (not 
via CLI)
+5. **Resource Limits**: Enforced CPU and memory limits per container
+
+## Troubleshooting
+
+### Containers not starting
+```bash
+# Check container logs
+docker-compose logs banyandb-stream
+docker-compose logs banyandb-trace
+
+# Check container health
+docker inspect banyandb-stream | grep -A 5 "Health"
+docker inspect banyandb-trace | grep -A 5 "Health"
+```
+
+### Connection refused errors
+- Ensure Docker is running
+- Check if ports are already in use: `lsof -i :17912` or `lsof -i :27912`
+- Verify containers are healthy: `docker-compose ps`
+
+### Schema loading failures
+- Check if schema files exist in `../testdata/schema/`
+- Verify file permissions
+- Check container logs for specific errors
+
+## Performance Monitoring
+
+During the test, you can monitor container performance:
+
+```bash
+# Real-time stats
+docker stats banyandb-stream banyandb-trace
+
+# Access pprof (stream)
+go tool pprof http://localhost:6060/debug/pprof/profile
+
+# Access pprof (trace)
+go tool pprof http://localhost:16060/debug/pprof/profile
+
+# Metrics endpoints
+curl http://localhost:2121/metrics   # Stream
+curl http://localhost:12121/metrics  # Trace
+```
+
+## Expected Output
+
+A successful test run will show:
+1. Docker images being built with BanyanDB release binaries
+2. Both containers starting and becoming healthy
+3. Schema loading confirmation for both instances
+4. Performance test results comparing stream vs trace models
+5. Container resource usage statistics
+6. Containers being stopped and cleaned up
+
+Example output snippet:
+```
+✅ Stream vs Trace Performance Test - Complete Workflow
+Building Docker containers...
+Starting Docker containers...
+Waiting for containers to be healthy...
+Running performance tests...
+=== RUN   TestStreamVsTraceDocker
+...performance metrics...
+--- PASS: TestStreamVsTraceDocker
+Test completed successfully!
+```
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/client_wrappers.go 
b/test/stress/stream-vs-trace/docker/client_wrappers.go
new file mode 100644
index 00000000..8e786c49
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/client_wrappers.go
@@ -0,0 +1,52 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package docker
+
+import (
+       "google.golang.org/grpc"
+       
+       streamvstrace 
"github.com/apache/skywalking-banyandb/test/stress/stream-vs-trace"
+)
+
+// DockerStreamClient wraps StreamClient with connection exposed
+type DockerStreamClient struct {
+       *streamvstrace.StreamClient
+       conn *grpc.ClientConn
+}
+
+// NewDockerStreamClient creates a new DockerStreamClient instance
+func NewDockerStreamClient(conn *grpc.ClientConn) *DockerStreamClient {
+       return &DockerStreamClient{
+               StreamClient: streamvstrace.NewStreamClient(conn),
+               conn:         conn,
+       }
+}
+
+// DockerTraceClient wraps TraceClient with connection exposed
+type DockerTraceClient struct {
+       *streamvstrace.TraceClient
+       conn *grpc.ClientConn
+}
+
+// NewDockerTraceClient creates a new DockerTraceClient instance
+func NewDockerTraceClient(conn *grpc.ClientConn) *DockerTraceClient {
+       return &DockerTraceClient{
+               TraceClient: streamvstrace.NewTraceClient(conn),
+               conn:        conn,
+       }
+}
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/docker-compose.yml 
b/test/stress/stream-vs-trace/docker/docker-compose.yml
new file mode 100644
index 00000000..cd23f428
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/docker-compose.yml
@@ -0,0 +1,96 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: '3.8'
+
+services:
+  banyandb-stream:
+    build:
+      context: ../../../..
+      dockerfile: test/stress/stream-vs-trace/docker/Dockerfile
+    container_name: banyandb-stream
+    hostname: banyandb-stream
+    environment:
+      - GRPC_GO_LOG_SEVERITY_LEVEL=ERROR
+      - GRPC_GO_LOG_FORMATTER=json
+    volumes:
+      - stream-data:/data
+    ports:
+      - "17912:17912"  # gRPC port
+      - "17913:17913"  # HTTP port
+      - "6060:6060"    # pprof port
+      - "2121:2121"    # metrics port
+    networks:
+      - banyandb-network
+    deploy:
+      resources:
+        limits:
+          cpus: '2.0'
+          memory: 4G
+        reservations:
+          cpus: '1.0'
+          memory: 2G
+    command: ["standalone", "--stream-data-path=/data", "--logging-level=info"]
+    healthcheck:
+      test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", 
"http://localhost:17913/api/healthz";]
+      interval: 10s
+      timeout: 5s
+      retries: 5
+      start_period: 30s
+
+  banyandb-trace:
+    build:
+      context: ../../../..
+      dockerfile: test/stress/stream-vs-trace/docker/Dockerfile
+    container_name: banyandb-trace
+    hostname: banyandb-trace
+    environment:
+      - GRPC_GO_LOG_SEVERITY_LEVEL=ERROR
+      - GRPC_GO_LOG_FORMATTER=json
+    volumes:
+      - trace-data:/data
+    ports:
+      - "27912:17912"  # gRPC port (offset by 10000)
+      - "27913:17913"  # HTTP port (offset by 10000)
+      - "16060:6060"   # pprof port (offset by 10000)
+      - "12121:2121"   # metrics port (offset by 10000)
+    networks:
+      - banyandb-network
+    deploy:
+      resources:
+        limits:
+          cpus: '2.0'
+          memory: 4G
+        reservations:
+          cpus: '1.0'
+          memory: 2G
+    command: ["standalone", "--trace-data-path=/data", "--logging-level=info"]
+    healthcheck:
+      test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", 
"http://localhost:17913/api/healthz";]
+      interval: 10s
+      timeout: 5s
+      retries: 5
+      start_period: 30s
+
+volumes:
+  stream-data:
+    driver: local
+  trace-data:
+    driver: local
+
+networks:
+  banyandb-network:
+    driver: bridge
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/docker_test.go 
b/test/stress/stream-vs-trace/docker/docker_test.go
new file mode 100644
index 00000000..f5a170ce
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/docker_test.go
@@ -0,0 +1,148 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package docker provides performance comparison test between Stream and 
Trace models using Docker.
+package docker
+
+import (
+       "context"
+       "fmt"
+       "os"
+       "testing"
+       "time"
+
+       g "github.com/onsi/ginkgo/v2"
+       "github.com/onsi/gomega"
+       "google.golang.org/grpc"
+       "google.golang.org/grpc/credentials/insecure"
+
+       "github.com/apache/skywalking-banyandb/pkg/grpchelper"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+       "github.com/apache/skywalking-banyandb/pkg/test/flags"
+       "github.com/apache/skywalking-banyandb/pkg/test/helpers"
+       streamvstrace 
"github.com/apache/skywalking-banyandb/test/stress/stream-vs-trace"
+)
+
+func TestStreamVsTraceDocker(t *testing.T) {
+       if os.Getenv("DOCKER_TEST") != "true" {
+               t.Skip("Skipping Docker test. Set DOCKER_TEST=true to run.")
+       }
+       gomega.RegisterFailHandler(g.Fail)
+       g.RunSpecs(t, "Stream vs Trace Performance Docker Suite", 
g.Label("docker", "performance", "slow"))
+}
+
+var _ = g.Describe("Stream vs Trace Performance Docker", func() {
+       g.BeforeEach(func() {
+               gomega.Expect(logger.Init(logger.Logging{
+                       Env:   "dev",
+                       Level: flags.LogLevel,
+               })).To(gomega.Succeed())
+       })
+
+       g.It("should run performance comparison using Docker containers", 
func() {
+               // Define connection addresses for the two containers
+               streamAddr := "localhost:17912"  // Stream container gRPC port
+               traceAddr := "localhost:27912"   // Trace container gRPC port
+
+               // Wait for both containers to be ready
+               g.By("Waiting for Stream container to be ready")
+               gomega.Eventually(helpers.HealthCheck(streamAddr, 
10*time.Second, 10*time.Second, 
grpc.WithTransportCredentials(insecure.NewCredentials())),
+                       2*time.Minute).Should(gomega.Succeed())
+
+               g.By("Waiting for Trace container to be ready")
+               gomega.Eventually(helpers.HealthCheck(traceAddr, 
10*time.Second, 10*time.Second, 
grpc.WithTransportCredentials(insecure.NewCredentials())),
+                       2*time.Minute).Should(gomega.Succeed())
+
+               // Create gRPC connections
+               streamConn, err := grpchelper.Conn(streamAddr, 10*time.Second, 
grpc.WithTransportCredentials(insecure.NewCredentials()))
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               defer streamConn.Close()
+
+               traceConn, err := grpchelper.Conn(traceAddr, 10*time.Second, 
grpc.WithTransportCredentials(insecure.NewCredentials()))
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               defer traceConn.Close()
+
+               // Create Docker clients with exposed connections
+               streamClient := NewDockerStreamClient(streamConn)
+               traceClient := NewDockerTraceClient(traceConn)
+
+               // Create context for operations
+               ctx := context.Background()
+
+               // Load schemas for both containers
+               g.By("Loading schemas into containers")
+               err = loadSchemasToContainer(ctx, streamClient, traceClient)
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+               // Verify schemas are loaded
+               g.By("Verifying schemas in Stream container")
+               streamGroupExists, err := streamClient.VerifyGroup(ctx, 
"stream_performance_test")
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               gomega.Expect(streamGroupExists).To(gomega.BeTrue())
+
+               streamExists, err := streamClient.VerifySchema(ctx, 
"stream_performance_test", "segment_stream")
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               gomega.Expect(streamExists).To(gomega.BeTrue())
+
+               g.By("Verifying schemas in Trace container")
+               traceGroupExists, err := traceClient.VerifyGroup(ctx, 
"trace_performance_test")
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               gomega.Expect(traceGroupExists).To(gomega.BeTrue())
+
+               traceExists, err := traceClient.VerifySchema(ctx, 
"trace_performance_test", "segment_trace")
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+               gomega.Expect(traceExists).To(gomega.BeTrue())
+
+               fmt.Println("All schemas, groups, index rules, and index rule 
bindings verified successfully!")
+
+               // Run performance tests
+               g.By("Running Stream vs Trace performance comparison")
+
+               // Create benchmark runner with configuration
+               config := 
streamvstrace.DefaultBenchmarkConfig(streamvstrace.SmallScale)
+               config.TestDuration = 2 * time.Minute // Shorter duration for 
Docker testing
+               config.Concurrency = 5                // Lower concurrency for 
Docker
+
+               benchmarkRunner := streamvstrace.NewBenchmarkRunner(config, 
streamClient.StreamClient, traceClient.TraceClient)
+
+               // Run write benchmark
+               err = benchmarkRunner.RunWriteBenchmark(ctx)
+               gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+               // Compare results
+               benchmarkRunner.CompareResults()
+       })
+})
+
+// loadSchemasToContainer loads the required schemas into both containers
+func loadSchemasToContainer(ctx context.Context, streamClient 
*DockerStreamClient, traceClient *DockerTraceClient) error {
+       // Create schema clients for both connections
+       streamSchemaClient := NewSchemaClient(streamClient.conn)
+       traceSchemaClient := NewSchemaClient(traceClient.conn)
+
+       // Load stream-related schemas
+       if err := streamSchemaClient.LoadStreamSchemas(ctx); err != nil {
+               return fmt.Errorf("failed to load stream schemas: %w", err)
+       }
+
+       // Load trace-related schemas
+       if err := traceSchemaClient.LoadTraceSchemas(ctx); err != nil {
+               return fmt.Errorf("failed to load trace schemas: %w", err)
+       }
+
+       return nil
+}
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/run-docker-test.sh 
b/test/stress/stream-vs-trace/docker/run-docker-test.sh
new file mode 100755
index 00000000..b4596668
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/run-docker-test.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+PROJECT_ROOT="$( cd "$SCRIPT_DIR/../../../.." && pwd )"
+
+# Colors for output
+GREEN='\033[0;32m'
+RED='\033[0;31m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Default command
+COMMAND=""
+
+# Function to check if Docker is running
+check_docker() {
+    if ! docker info > /dev/null 2>&1; then
+        echo -e "${RED}Docker is not running. Please start Docker and try 
again.${NC}"
+        exit 1
+    fi
+}
+
+# Function to show usage
+show_usage() {
+    echo "Usage: $0 [COMMAND] [OPTIONS]"
+    echo ""
+    echo "Commands:"
+    echo "  build     Build Docker images"
+    echo "  up        Start containers and wait for health"
+    echo "  test      Run performance tests (containers must be running)"
+    echo "  down      Stop and remove containers"
+    echo "  clean     Clean up everything including volumes"
+    echo "  logs      Show container logs"
+    echo "  ps        Show container status"
+    echo "  stats     Show container resource usage"
+    echo "  all       Run complete test (build, up, test, down)"
+    echo "  help      Show this help message"
+    echo ""
+    echo "Options:"
+    echo "  --help    Show this help message"
+    echo ""
+    echo "Examples:"
+    echo "  $0 all              # Run complete test workflow"
+    echo "  $0 build            # Just build images"
+    echo "  $0 up               # Start containers"
+    echo "  $0 test             # Run tests"
+    echo "  $0 down             # Stop containers"
+}
+
+# Function to build Docker images
+do_build() {
+    echo -e "${GREEN}Building Docker containers...${NC}"
+    cd "$PROJECT_ROOT"
+    make generate
+    PLATFORMS=linux/amd64 make -C banyand release
+    
+    cd "$SCRIPT_DIR"
+    docker compose build
+    echo -e "${GREEN}Build complete.${NC}"
+}
+
+# Function to start containers
+do_up() {
+    echo -e "${GREEN}Starting Docker containers...${NC}"
+    cd "$SCRIPT_DIR"
+    docker compose up -d
+    
+    echo -e "${GREEN}Waiting for containers to be healthy...${NC}"
+    ./wait-for-healthy.sh
+    echo -e "${GREEN}Containers are ready.${NC}"
+}
+
+# Function to run tests
+do_test() {
+    echo -e "${GREEN}Running performance tests...${NC}"
+    cd "$PROJECT_ROOT"
+    
+    # Set environment variable to enable Docker test
+    export DOCKER_TEST=true
+    
+    # Run the test from the docker directory to ensure relative paths work
+    cd "$SCRIPT_DIR"
+    go test -v -timeout 30m ./... -run TestStreamVsTraceDocker
+    
+    echo -e "${GREEN}Test completed successfully!${NC}"
+    
+    # Optional: Show container resource usage
+    echo -e "\n${YELLOW}Container Resource Usage:${NC}"
+    docker stats --no-stream banyandb-stream banyandb-trace
+}
+
+# Function to stop containers
+do_down() {
+    echo -e "${YELLOW}Stopping containers...${NC}"
+    cd "$SCRIPT_DIR"
+    docker compose down
+    echo -e "${GREEN}Containers stopped.${NC}"
+}
+
+# Function to clean everything
+do_clean() {
+    echo -e "${YELLOW}Cleaning up everything...${NC}"
+    cd "$SCRIPT_DIR"
+    docker compose down -v
+    docker compose rm -f
+    echo -e "${GREEN}Cleanup complete.${NC}"
+}
+
+# Function to show logs
+do_logs() {
+    cd "$SCRIPT_DIR"
+    docker compose logs -f
+}
+
+# Function to show container status
+do_ps() {
+    cd "$SCRIPT_DIR"
+    docker compose ps
+}
+
+# Function to show container stats
+do_stats() {
+    docker stats --no-stream banyandb-stream banyandb-trace
+}
+
+# Function to run all steps
+do_all() {
+    echo -e "${GREEN}Stream vs Trace Performance Test - Complete Workflow${NC}"
+    echo -e "${GREEN}====================================================${NC}"
+    
+    check_docker
+    do_build
+    do_up
+    do_test
+    do_down
+}
+
+# Parse command
+if [ $# -eq 0 ]; then
+    show_usage
+    exit 0
+fi
+
+COMMAND=$1
+shift
+
+# Parse remaining arguments
+while [[ $# -gt 0 ]]; do
+    case $1 in
+        --help|-h)
+            show_usage
+            exit 0
+            ;;
+        *)
+            echo -e "${RED}Unknown option: $1${NC}"
+            echo "Use --help for usage information"
+            exit 1
+            ;;
+    esac
+    shift
+done
+
+# Execute the command
+case $COMMAND in
+    build)
+        check_docker
+        do_build
+        ;;
+    up)
+        check_docker
+        do_up
+        ;;
+    test)
+        do_test
+        ;;
+    down)
+        do_down
+        ;;
+    clean)
+        do_clean
+        ;;
+    logs)
+        do_logs
+        ;;
+    ps)
+        do_ps
+        ;;
+    stats)
+        do_stats
+        ;;
+    all)
+        do_all
+        ;;
+    help)
+        show_usage
+        exit 0
+        ;;
+    *)
+        echo -e "${RED}Unknown command: $COMMAND${NC}"
+        show_usage
+        exit 1
+        ;;
+esac
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/schema_client.go 
b/test/stress/stream-vs-trace/docker/schema_client.go
new file mode 100644
index 00000000..ad403d2c
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/schema_client.go
@@ -0,0 +1,372 @@
+// Licensed to Apache Software Foundation (ASF) under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Apache Software Foundation (ASF) licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package docker
+
+import (
+       "context"
+       "encoding/json"
+       "fmt"
+       "os"
+       "path/filepath"
+
+       "github.com/pkg/errors"
+       "google.golang.org/grpc"
+       "google.golang.org/grpc/codes"
+       "google.golang.org/grpc/status"
+       "google.golang.org/protobuf/encoding/protojson"
+
+       commonv1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/common/v1"
+       databasev1 
"github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
+       "github.com/apache/skywalking-banyandb/pkg/logger"
+)
+
+// SchemaClient handles schema operations via gRPC
+type SchemaClient struct {
+       groupClient       databasev1.GroupRegistryServiceClient
+       streamClient      databasev1.StreamRegistryServiceClient
+       traceClient       databasev1.TraceRegistryServiceClient
+       indexRuleClient   databasev1.IndexRuleRegistryServiceClient
+       indexBindingClient databasev1.IndexRuleBindingRegistryServiceClient
+}
+
+// NewSchemaClient creates a new schema client
+func NewSchemaClient(conn *grpc.ClientConn) *SchemaClient {
+       return &SchemaClient{
+               groupClient:       
databasev1.NewGroupRegistryServiceClient(conn),
+               streamClient:      
databasev1.NewStreamRegistryServiceClient(conn),
+               traceClient:       
databasev1.NewTraceRegistryServiceClient(conn),
+               indexRuleClient:   
databasev1.NewIndexRuleRegistryServiceClient(conn),
+               indexBindingClient: 
databasev1.NewIndexRuleBindingRegistryServiceClient(conn),
+       }
+}
+
+// LoadStreamSchemas loads all stream-related schemas
+func (s *SchemaClient) LoadStreamSchemas(ctx context.Context) error {
+       // Load stream group
+       if err := s.loadStreamGroup(ctx); err != nil {
+               return fmt.Errorf("failed to load stream group: %w", err)
+       }
+
+       // Load stream schema
+       if err := s.loadStreamSchema(ctx); err != nil {
+               return fmt.Errorf("failed to load stream schema: %w", err)
+       }
+
+       // Load stream index rules
+       if err := s.loadStreamIndexRules(ctx); err != nil {
+               return fmt.Errorf("failed to load stream index rules: %w", err)
+       }
+
+       // Load stream index rule bindings
+       if err := s.loadStreamIndexRuleBindings(ctx); err != nil {
+               return fmt.Errorf("failed to load stream index rule bindings: 
%w", err)
+       }
+
+       return nil
+}
+
+// LoadTraceSchemas loads all trace-related schemas
+func (s *SchemaClient) LoadTraceSchemas(ctx context.Context) error {
+       // Load trace group
+       if err := s.loadTraceGroup(ctx); err != nil {
+               return fmt.Errorf("failed to load trace group: %w", err)
+       }
+
+       // Load trace schema
+       if err := s.loadTraceSchema(ctx); err != nil {
+               return fmt.Errorf("failed to load trace schema: %w", err)
+       }
+
+       // Load trace index rules
+       if err := s.loadTraceIndexRules(ctx); err != nil {
+               return fmt.Errorf("failed to load trace index rules: %w", err)
+       }
+
+       // Load trace index rule bindings
+       if err := s.loadTraceIndexRuleBindings(ctx); err != nil {
+               return fmt.Errorf("failed to load trace index rule bindings: 
%w", err)
+       }
+
+       return nil
+}
+
+func (s *SchemaClient) loadStreamGroup(ctx context.Context) error {
+       groupFile := filepath.Join("../testdata", "schema", "group.json")
+       data, err := os.ReadFile(groupFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read group file: %s", 
groupFile)
+       }
+
+       var groupData []json.RawMessage
+       if err := json.Unmarshal(data, &groupData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal groups array")
+       }
+
+       // Load only stream group
+       for _, groupBytes := range groupData {
+               var group commonv1.Group
+               if err := protojson.Unmarshal(groupBytes, &group); err != nil {
+                       return errors.Wrapf(err, "failed to unmarshal group")
+               }
+
+               if group.Metadata.Name == "stream_performance_test" {
+                       _, err := s.groupClient.Create(ctx, 
&databasev1.GroupRegistryServiceCreateRequest{
+                               Group: &group,
+                       })
+                       if err != nil {
+                               if status.Code(err) == codes.AlreadyExists {
+                                       logger.Infof("Group %s already exists, 
skipping", group.Metadata.Name)
+                                       return nil
+                               }
+                               return errors.Wrapf(err, "failed to create 
group: %s", group.Metadata.Name)
+                       }
+                       logger.Infof("Created group: %s", group.Metadata.Name)
+                       return nil
+               }
+       }
+
+       return fmt.Errorf("stream group not found in group.json")
+}
+
+func (s *SchemaClient) loadTraceGroup(ctx context.Context) error {
+       groupFile := filepath.Join("../testdata", "schema", "group.json")
+       data, err := os.ReadFile(groupFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read group file: %s", 
groupFile)
+       }
+
+       var groupData []json.RawMessage
+       if err := json.Unmarshal(data, &groupData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal groups array")
+       }
+
+       // Load only trace group
+       for _, groupBytes := range groupData {
+               var group commonv1.Group
+               if err := protojson.Unmarshal(groupBytes, &group); err != nil {
+                       return errors.Wrapf(err, "failed to unmarshal group")
+               }
+
+               if group.Metadata.Name == "trace_performance_test" {
+                       _, err := s.groupClient.Create(ctx, 
&databasev1.GroupRegistryServiceCreateRequest{
+                               Group: &group,
+                       })
+                       if err != nil {
+                               if status.Code(err) == codes.AlreadyExists {
+                                       logger.Infof("Group %s already exists, 
skipping", group.Metadata.Name)
+                                       return nil
+                               }
+                               return errors.Wrapf(err, "failed to create 
group: %s", group.Metadata.Name)
+                       }
+                       logger.Infof("Created group: %s", group.Metadata.Name)
+                       return nil
+               }
+       }
+
+       return fmt.Errorf("trace group not found in group.json")
+}
+
+func (s *SchemaClient) loadStreamSchema(ctx context.Context) error {
+       streamFile := filepath.Join("../testdata", "schema", 
"stream_schema.json")
+       data, err := os.ReadFile(streamFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read stream schema file: 
%s", streamFile)
+       }
+
+       var stream databasev1.Stream
+       if err = protojson.Unmarshal(data, &stream); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal stream schema")
+       }
+
+       _, err = s.streamClient.Create(ctx, 
&databasev1.StreamRegistryServiceCreateRequest{
+               Stream: &stream,
+       })
+       if err != nil {
+               if status.Code(err) == codes.AlreadyExists {
+                       logger.Infof("Stream %s already exists, skipping", 
stream.Metadata.Name)
+                       return nil
+               }
+               return errors.Wrapf(err, "failed to create stream: %s", 
stream.Metadata.Name)
+       }
+
+       logger.Infof("Created stream: %s", stream.Metadata.Name)
+       return nil
+}
+
+func (s *SchemaClient) loadTraceSchema(ctx context.Context) error {
+       traceFile := filepath.Join("../testdata", "schema", "trace_schema.json")
+       data, err := os.ReadFile(traceFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read trace schema file: 
%s", traceFile)
+       }
+
+       var trace databasev1.Trace
+       if err = protojson.Unmarshal(data, &trace); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal trace schema")
+       }
+
+       _, err = s.traceClient.Create(ctx, 
&databasev1.TraceRegistryServiceCreateRequest{
+               Trace: &trace,
+       })
+       if err != nil {
+               if status.Code(err) == codes.AlreadyExists {
+                       logger.Infof("Trace %s already exists, skipping", 
trace.Metadata.Name)
+                       return nil
+               }
+               return errors.Wrapf(err, "failed to create trace: %s", 
trace.Metadata.Name)
+       }
+
+       logger.Infof("Created trace: %s", trace.Metadata.Name)
+       return nil
+}
+
+func (s *SchemaClient) loadStreamIndexRules(ctx context.Context) error {
+       indexFile := filepath.Join("../testdata", "schema", 
"stream_index_rules.json")
+       data, err := os.ReadFile(indexFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read stream index rules 
file: %s", indexFile)
+       }
+
+       var indexRuleData []json.RawMessage
+       if err := json.Unmarshal(data, &indexRuleData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal stream index 
rules array")
+       }
+
+       for i, ruleBytes := range indexRuleData {
+               var rule databasev1.IndexRule
+               if err := protojson.Unmarshal(ruleBytes, &rule); err != nil {
+                       return errors.Wrapf(err, "failed to unmarshal stream 
index rule %d", i)
+               }
+
+               _, err := s.indexRuleClient.Create(ctx, 
&databasev1.IndexRuleRegistryServiceCreateRequest{
+                       IndexRule: &rule,
+               })
+               if err != nil {
+                       if status.Code(err) == codes.AlreadyExists {
+                               logger.Infof("Stream index rule %s already 
exists, skipping", rule.Metadata.Name)
+                               continue
+                       }
+                       return errors.Wrapf(err, "failed to create stream index 
rule: %s", rule.Metadata.Name)
+               }
+               logger.Infof("Created stream index rule: %s", 
rule.Metadata.Name)
+       }
+
+       return nil
+}
+
+func (s *SchemaClient) loadTraceIndexRules(ctx context.Context) error {
+       indexFile := filepath.Join("../testdata", "schema", 
"trace_index_rules.json")
+       data, err := os.ReadFile(indexFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read trace index rules 
file: %s", indexFile)
+       }
+
+       var indexRuleData []json.RawMessage
+       if err := json.Unmarshal(data, &indexRuleData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal trace index rules 
array")
+       }
+
+       for i, ruleBytes := range indexRuleData {
+               var rule databasev1.IndexRule
+               if err := protojson.Unmarshal(ruleBytes, &rule); err != nil {
+                       return errors.Wrapf(err, "failed to unmarshal trace 
index rule %d", i)
+               }
+
+               _, err := s.indexRuleClient.Create(ctx, 
&databasev1.IndexRuleRegistryServiceCreateRequest{
+                       IndexRule: &rule,
+               })
+               if err != nil {
+                       if status.Code(err) == codes.AlreadyExists {
+                               logger.Infof("Trace index rule %s already 
exists, skipping", rule.Metadata.Name)
+                               continue
+                       }
+                       return errors.Wrapf(err, "failed to create trace index 
rule: %s", rule.Metadata.Name)
+               }
+               logger.Infof("Created trace index rule: %s", rule.Metadata.Name)
+       }
+
+       return nil
+}
+
+func (s *SchemaClient) loadStreamIndexRuleBindings(ctx context.Context) error {
+       bindingFile := filepath.Join("../testdata", "schema", 
"stream_index_rule_bindings.json")
+       data, err := os.ReadFile(bindingFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read stream index rule 
bindings file: %s", bindingFile)
+       }
+
+       var bindingData []json.RawMessage
+       if err := json.Unmarshal(data, &bindingData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal stream index rule 
bindings array")
+       }
+
+       for i, bindingBytes := range bindingData {
+               var binding databasev1.IndexRuleBinding
+               if err := protojson.Unmarshal(bindingBytes, &binding); err != 
nil {
+                       return errors.Wrapf(err, "failed to unmarshal stream 
index rule binding %d", i)
+               }
+
+               _, err := s.indexBindingClient.Create(ctx, 
&databasev1.IndexRuleBindingRegistryServiceCreateRequest{
+                       IndexRuleBinding: &binding,
+               })
+               if err != nil {
+                       if status.Code(err) == codes.AlreadyExists {
+                               logger.Infof("Stream index rule binding %s 
already exists, skipping", binding.Metadata.Name)
+                               continue
+                       }
+                       return errors.Wrapf(err, "failed to create stream index 
rule binding: %s", binding.Metadata.Name)
+               }
+               logger.Infof("Created stream index rule binding: %s", 
binding.Metadata.Name)
+       }
+
+       return nil
+}
+
+func (s *SchemaClient) loadTraceIndexRuleBindings(ctx context.Context) error {
+       bindingFile := filepath.Join("../testdata", "schema", 
"trace_index_rule_bindings.json")
+       data, err := os.ReadFile(bindingFile)
+       if err != nil {
+               return errors.Wrapf(err, "failed to read trace index rule 
bindings file: %s", bindingFile)
+       }
+
+       var bindingData []json.RawMessage
+       if err := json.Unmarshal(data, &bindingData); err != nil {
+               return errors.Wrapf(err, "failed to unmarshal trace index rule 
bindings array")
+       }
+
+       for i, bindingBytes := range bindingData {
+               var binding databasev1.IndexRuleBinding
+               if err := protojson.Unmarshal(bindingBytes, &binding); err != 
nil {
+                       return errors.Wrapf(err, "failed to unmarshal trace 
index rule binding %d", i)
+               }
+
+               _, err := s.indexBindingClient.Create(ctx, 
&databasev1.IndexRuleBindingRegistryServiceCreateRequest{
+                       IndexRuleBinding: &binding,
+               })
+               if err != nil {
+                       if status.Code(err) == codes.AlreadyExists {
+                               logger.Infof("Trace index rule binding %s 
already exists, skipping", binding.Metadata.Name)
+                               continue
+                       }
+                       return errors.Wrapf(err, "failed to create trace index 
rule binding: %s", binding.Metadata.Name)
+               }
+               logger.Infof("Created trace index rule binding: %s", 
binding.Metadata.Name)
+       }
+
+       return nil
+}
\ No newline at end of file
diff --git a/test/stress/stream-vs-trace/docker/wait-for-healthy.sh 
b/test/stress/stream-vs-trace/docker/wait-for-healthy.sh
new file mode 100755
index 00000000..117a46fe
--- /dev/null
+++ b/test/stress/stream-vs-trace/docker/wait-for-healthy.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Wait for containers to be healthy
+timeout=60
+elapsed=0
+
+echo -n "Waiting for containers to be healthy"
+
+while [ $elapsed -lt $timeout ]; do
+    stream_health=$(docker inspect --format='{{.State.Health.Status}}' 
banyandb-stream 2>/dev/null || echo "not found")
+    trace_health=$(docker inspect --format='{{.State.Health.Status}}' 
banyandb-trace 2>/dev/null || echo "not found")
+    
+    if [ "$stream_health" = "healthy" ] && [ "$trace_health" = "healthy" ]; 
then
+        echo -e "\nBoth containers are healthy!"
+        exit 0
+    fi
+    
+    echo -n "."
+    sleep 1
+    elapsed=$((elapsed + 1))
+done
+
+echo -e "\nTimeout waiting for containers to be healthy."
+docker compose ps
+exit 1
\ No newline at end of file


Reply via email to