This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb-helm.git


The following commit(s) were added to refs/heads/master by this push:
     new e0d75d0  StatefulSet improvements  (#34)
e0d75d0 is described below

commit e0d75d0e62f7e91b4ba7dfe3cfde842c28288127
Author: Gao Hongtao <hanahm...@gmail.com>
AuthorDate: Thu Aug 14 09:44:36 2025 +0800

    StatefulSet improvements  (#34)
    
    * feat: implement lifecycle sidecar and StatefulSet improvements for 0.5.0
    
    - Add lifecycle sidecar support with configurable schedules per data node 
role
    - Convert liaison from Deployment to StatefulSet with headless services
    - Implement component-based storage configuration for data, liaison, and 
standalone
    - Add internal-grpc port 18912 for liaison pod-to-pod communication
    - Enable etcd defragmentation with daily scheduling
    - Add volume permissions init containers and enhance pod hostname 
configuration
    - Update e2e test configurations and documentation
    
    * feat: add new E2E test configuration for BanyanDB lifecycle
    
    - Introduced a new E2E test configuration for BanyanDB lifecycle, enhancing 
testing coverage for the lifecycle management of the database.
---
 .github/workflows/e2e.ci.yaml                      |   2 +
 CHANGES.md                                         |  16 +-
 chart/Chart.lock                                   |   2 +-
 chart/templates/cluster_data_statefulset.yaml      | 107 +++++--
 .../cluster_liaison_headless_service.yaml          |   6 -
 chart/templates/cluster_liaison_statefulset.yaml   |  56 +++-
 chart/templates/standalone_statefulset.yaml        |  50 ++-
 chart/values-lifecycle.yaml                        |  55 +++-
 chart/values.yaml                                  |  47 ++-
 doc/backup.md                                      |  16 +-
 doc/parameters.md                                  | 225 +++++++-------
 test/e2e/e2e-banyandb-cluster.yaml                 |   5 +
 ...standalone.yaml => e2e-banyandb-lifecycle.yaml} |  11 +-
 test/e2e/e2e-banyandb-standalone.yaml              |   5 +
 test/e2e/values.cluster.yaml                       |  38 ++-
 .../{values.cluster.yaml => values.lifecycle.yaml} | 340 +++++++++------------
 test/e2e/values.standalone.yaml                    |  10 +-
 17 files changed, 611 insertions(+), 380 deletions(-)

diff --git a/.github/workflows/e2e.ci.yaml b/.github/workflows/e2e.ci.yaml
index 8ef3d06..e11bf3a 100644
--- a/.github/workflows/e2e.ci.yaml
+++ b/.github/workflows/e2e.ci.yaml
@@ -39,6 +39,8 @@ jobs:
             config: test/e2e/e2e-banyandb-standalone.yaml
           - name: Run Skywalking E2E Test (BanyanDB cluster as database)
             config: test/e2e/e2e-banyandb-cluster.yaml
+          - name: Run Skywalking E2E Test (BanyanDB lifecycle as database)
+            config: test/e2e/e2e-banyandb-lifecycle.yaml
     name: ${{ matrix.test.name }}
     env:
       OAP_TAG: bf04afdb2a841c60d5e27f5a9fc62d0879a5600c
diff --git a/CHANGES.md b/CHANGES.md
index 710e649..8015e14 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -7,13 +7,15 @@ Release Notes.
 
 #### Features
 
-- Support Lifecycle Sidecar
-- Introduce the data node template to support different node roles
-- Convert liaison component from Deployment to StatefulSet 
-- Implement component-based storage configuration with separate data, liaison, 
and standalone sections. Enable the external data and liaison stroage by 
default.
-- Add headless services for StatefulSet pod discovery and stable network 
identities
-- Add internal-grpc port 18912 for liaison pod-to-pod communication
-- Enable etcd defragmentation by default with daily scheduling
+- Support Lifecycle Sidecar for automated data management across hot/warm/cold 
node roles with configurable schedules
+- Introduce the data node template system to support different node roles 
(hot, warm, cold) with role-specific configurations
+- Convert liaison component from Deployment to StatefulSet for improved state 
management and stable network identities
+- Implement component-based storage configuration with separate data, liaison, 
and standalone sections. Enable external data and liaison storage by default 
with persistent volume claims
+- Add headless services for StatefulSet pod discovery and stable network 
identities, enabling reliable pod-to-pod communication
+- Add internal-grpc port 18912 for liaison pod-to-pod communication, enhancing 
cluster internal networking
+- Enable etcd defragmentation by default with daily scheduling (0 0 * * *) to 
maintain optimal etcd performance
+- Enhance pod hostname configuration using headless services for improved 
service discovery and networking
+- Implement volume permissions init containers for proper file ownership and 
permissions on mounted volumes
 
 #### Chores
 
diff --git a/chart/Chart.lock b/chart/Chart.lock
index 6cc4dc8..c2e24be 100644
--- a/chart/Chart.lock
+++ b/chart/Chart.lock
@@ -3,4 +3,4 @@ dependencies:
   repository: oci://registry-1.docker.io/bitnamicharts
   version: 12.0.4
 digest: sha256:89059bc1e608d19e843cadb665bf6467858908eaea4d8f382b5046cdbc901f51
-generated: "2025-06-13T13:50:34.571368939Z"
+generated: "2025-08-12T03:26:24.777446502Z"
diff --git a/chart/templates/cluster_data_statefulset.yaml 
b/chart/templates/cluster_data_statefulset.yaml
index b0a2289..3fc3163 100644
--- a/chart/templates/cluster_data_statefulset.yaml
+++ b/chart/templates/cluster_data_statefulset.yaml
@@ -56,6 +56,41 @@ spec:
       {{- end }}
       priorityClassName: {{ $roleConfig.priorityClassName }}
       initContainers:
+        {{- if $roleConfig.volumePermissions.enabled }}
+        - name: volume-permissions
+          image: {{ default "busybox:1.36" $roleConfig.volumePermissions.image 
}}
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            runAsUser: 0
+          command:
+            - sh
+            - -c
+            - |
+              set -euo pipefail
+              {{- $chownUser := default 1000 
$roleConfig.volumePermissions.chownUser -}}
+              {{- $chownGroup := default 1000 
$roleConfig.volumePermissions.chownGroup -}}
+              {{- if $.Values.storage.data.enabled }}
+              {{- range $claim := $.Values.storage.data.persistentVolumeClaims 
}}
+              {{- if eq $claim.nodeRole $roleName }}
+              {{- $mountName := (ternary $claim.existingClaimName 
$claim.claimName (not (empty $claim.existingClaimName))) }}
+              {{- range $mt := $claim.mountTargets }}
+              mkdir -p /mnt/{{ $mountName }}/{{ $mt }}
+              chown -R {{ $chownUser }}:{{ $chownGroup }} /mnt/{{ $mountName 
}}/{{ $mt }}
+              chmod -R 0770 /mnt/{{ $mountName }}/{{ $mt }}
+              {{- end }}
+              {{- end }}
+              {{- end }}
+              {{- end }}
+          {{- if $.Values.storage.data.enabled }}
+          volumeMounts:
+            {{- range $claim := $.Values.storage.data.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            - mountPath: /mnt/{{ ternary $claim.existingClaimName 
$claim.claimName (not (empty $claim.existingClaimName)) }}
+              name: {{ ternary $claim.existingClaimName $claim.claimName (not 
(empty $claim.existingClaimName)) }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+        {{- end }}
         {{- if $roleConfig.restoreInitContainer.enabled }}
         - name: restore-init
           image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
@@ -64,19 +99,24 @@ spec:
             - "/restore"
             - "run"
             - "--source={{ $roleConfig.backupSidecar.dest }}"
+            {{- if $roleConfig.restoreInitContainer.customFlags }}
+            {{- range $flag := $roleConfig.restoreInitContainer.customFlags }}
+            - {{ $flag | quote }}
+            {{- end }}
+            {{- end }}
           {{- if $.Values.storage.data.enabled }}
           volumeMounts:
             {{- range $claim := $.Values.storage.data.persistentVolumeClaims }}
             {{- if eq $claim.nodeRole $roleName }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
@@ -93,19 +133,25 @@ spec:
         - name: data
           image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
           imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          {{- with $roleConfig.containerSecurityContext }}
+          securityContext:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
           env:
-            # Hostname configuration for headless service
-            - name: BYDB_NODE_HOST_PROVIDER
-              value: "hostname"
-            - name: BYDB_NODE_HOST
+            # Pod metadata for hostname construction
+            - name: POD_NAME
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.name
-            {{- $mergedEnv := concat $.Values.cluster.data.nodeTemplate.env 
$roleConfig.env }}
-            {{- range $env := $mergedEnv }}
-            - name: {{ $env.name }}
-              value: {{ $env.value }}
-            {{- end }}
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            # Hostname configuration for headless service
+            - name: BYDB_NODE_HOST_PROVIDER
+              value: "flag"
+            - name: BYDB_NODE_HOST
+              value: "$(POD_NAME).{{ template "banyandb.fullname" $ }}-data-{{ 
$roleName }}-headless.$(POD_NAMESPACE)"
             - name: BYDB_NODE_LABELS
               value: "type={{ $roleName }}"
             {{- if $roleConfig.tls}}
@@ -142,12 +188,12 @@ spec:
             {{- else }}
             {{- include "banyandb.etcdEndpoints" $ | nindent 12 }}
             {{- end }}
-            - name: BYDB_NODE_HOST_PROVIDER
-              value: "ip"
-            - name: BYDB_NODE_HOST
-              valueFrom:
-                fieldRef:
-                  fieldPath: status.podIP
+            {{- $mergedEnv := concat $.Values.cluster.data.nodeTemplate.env 
$roleConfig.env }}
+            {{- range $env := $mergedEnv }}
+            - name: {{ $env.name }}
+              value: {{ $env.value }}
+            {{- end }}
+           
           args:
             - data 
           ports:
@@ -205,20 +251,20 @@ spec:
             {{- end }}
           {{- end }}
 
-          {{- if or $.Values.storage.enabled $roleConfig.tls }}
+          {{- if or $.Values.storage.data.enabled $roleConfig.tls }}
           volumeMounts:
-            {{- if $.Values.storage.enabled }}
-            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if $.Values.storage.data.enabled }}
+            {{- range $claim := $.Values.storage.data.persistentVolumeClaims }}
             {{- if eq $claim.nodeRole $roleName }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
@@ -248,6 +294,11 @@ spec:
             - "--time-style={{ $roleConfig.backupSidecar.timeStyle }}"
             - "--schedule={{ $roleConfig.backupSidecar.schedule }}"
             - "--grpc-addr=127.0.0.1:17912"
+            {{- if $roleConfig.backupSidecar.customFlags }}
+            {{- range $flag := $roleConfig.backupSidecar.customFlags }}
+            - {{ $flag | quote }}
+            {{- end }}
+            {{- end }}
           env:
             - name: ORDINAL_NUMBER
               valueFrom:
@@ -259,13 +310,13 @@ spec:
             {{- if eq $claim.nodeRole $roleName }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
@@ -316,13 +367,13 @@ spec:
             {{- if eq $claim.nodeRole $roleName }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
@@ -356,13 +407,13 @@ spec:
             {{- if eq $claim.nodeRole $roleName }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
diff --git a/chart/templates/cluster_liaison_headless_service.yaml 
b/chart/templates/cluster_liaison_headless_service.yaml
index ff43e8c..6f67f71 100644
--- a/chart/templates/cluster_liaison_headless_service.yaml
+++ b/chart/templates/cluster_liaison_headless_service.yaml
@@ -26,12 +26,6 @@ spec:
   clusterIP: None
   publishNotReadyAddresses: true
   ports:
-    - port: {{ .Values.cluster.liaison.grpcSvc.port }}
-      name: grpc
-      targetPort: grpc
-    - port: {{ .Values.cluster.liaison.httpSvc.port }}
-      name: http
-      targetPort: http
     - port: 18912
       name: internal-grpc
       targetPort: internal-grpc
diff --git a/chart/templates/cluster_liaison_statefulset.yaml 
b/chart/templates/cluster_liaison_statefulset.yaml
index e3f97ba..0c1ad4e 100644
--- a/chart/templates/cluster_liaison_statefulset.yaml
+++ b/chart/templates/cluster_liaison_statefulset.yaml
@@ -45,6 +45,33 @@ spec:
         {{- toYaml . | nindent 8 }}
       {{- end }}
       priorityClassName: {{ .Values.cluster.liaison.priorityClassName }}
+      {{- if and .Values.storage.liaison.enabled 
.Values.cluster.liaison.volumePermissions.enabled }}
+      initContainers:
+        - name: volume-permissions
+          image: {{ default "busybox:1.36" 
.Values.cluster.liaison.volumePermissions.image }}
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            runAsUser: 0
+          command:
+            - sh
+            - -c
+            - |
+              set -euo pipefail
+              CHOWN_USER={{ default 1000 
.Values.cluster.liaison.volumePermissions.chownUser }}
+              CHOWN_GROUP={{ default 1000 
.Values.cluster.liaison.volumePermissions.chownGroup }}
+              {{- range $claim := 
.Values.storage.liaison.persistentVolumeClaims }}
+              {{- range $mt := $claim.mountTargets }}
+              mkdir -p /mnt/{{ $claim.claimName }}/{{ $mt }}
+              chown -R ${CHOWN_USER}:${CHOWN_GROUP} /mnt/{{ $claim.claimName 
}}/{{ $mt }}
+              chmod -R 0770 /mnt/{{ $claim.claimName }}/{{ $mt }}
+              {{- end }}
+              {{- end }}
+          volumeMounts:
+            {{- range $claim := .Values.storage.liaison.persistentVolumeClaims 
}}
+            - mountPath: /mnt/{{ $claim.claimName }}
+              name: {{ $claim.claimName }}
+            {{- end }}
+      {{- end }}
       containers:
         - name: liaison
           {{- if eq .Values.cluster.ui.type "Embedded"  }}
@@ -53,18 +80,25 @@ spec:
           image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}-slim
           {{- end }}
           imagePullPolicy: {{ .Values.image.pullPolicy }}
+          {{- with .Values.cluster.liaison.containerSecurityContext }}
+          securityContext:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
           env:
-            # Hostname configuration for headless service
-            - name: BYDB_NODE_HOST_PROVIDER
-              value: "hostname"
-            - name: BYDB_NODE_HOST
+            # Pod metadata for hostname construction
+            - name: POD_NAME
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.name
-            {{- range $env := .Values.cluster.liaison.env }}
-            - name: {{ $env.name }}
-              value: {{ $env.value }}
-            {{- end }}
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            # Hostname configuration for headless service
+            - name: BYDB_NODE_HOST_PROVIDER
+              value: "flag"
+            - name: BYDB_NODE_HOST
+              value: "$(POD_NAME).{{ template "banyandb.fullname" . 
}}-liaison-headless.$(POD_NAMESPACE)"
             {{- if .Values.cluster.liaison.tls }}
             {{- if .Values.cluster.liaison.tls.grpcSecretName }}
             - name: BYDB_TLS
@@ -107,6 +141,10 @@ spec:
             {{- else }}
             {{- include "banyandb.etcdEndpoints" . | nindent 12 }}
             {{- end }}
+            {{- range $env := .Values.cluster.liaison.env }}
+            - name: {{ $env.name }}
+              value: {{ $env.value }}
+            {{- end }}
           args:
             - liaison
           ports:
@@ -183,7 +221,7 @@ spec:
             {{- if .Values.storage.liaison.enabled }}
             {{- range $claim := .Values.storage.liaison.persistentVolumeClaims 
}}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
diff --git a/chart/templates/standalone_statefulset.yaml 
b/chart/templates/standalone_statefulset.yaml
index 64094bf..ebfcdb1 100644
--- a/chart/templates/standalone_statefulset.yaml
+++ b/chart/templates/standalone_statefulset.yaml
@@ -43,22 +43,42 @@ spec:
         {{- toYaml . | nindent 8 }}
       {{- end }}
       priorityClassName: {{ .Values.standalone.priorityClassName }}
+      {{- if and .Values.storage.standalone.enabled 
.Values.standalone.volumePermissions.enabled }}
+      initContainers:
+        - name: volume-permissions
+          image: {{ default "busybox:1.36" 
.Values.standalone.volumePermissions.image }}
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            runAsUser: 0
+          command:
+            - sh
+            - -c
+            - |
+              set -euo pipefail
+              CHOWN_USER={{ default 1000 
.Values.standalone.volumePermissions.chownUser }}
+              CHOWN_GROUP={{ default 1000 
.Values.standalone.volumePermissions.chownGroup }}
+              {{- range $claim := 
.Values.storage.standalone.persistentVolumeClaims }}
+              {{- range $mt := $claim.mountTargets }}
+              mkdir -p /mnt/{{ $claim.claimName }}/{{ $mt }}
+              chown -R ${CHOWN_USER}:${CHOWN_GROUP} /mnt/{{ $claim.claimName 
}}/{{ $mt }}
+              chmod -R 0770 /mnt/{{ $claim.claimName }}/{{ $mt }}
+              {{- end }}
+              {{- end }}
+          volumeMounts:
+            {{- range $claim := 
.Values.storage.standalone.persistentVolumeClaims }}
+            - mountPath: /mnt/{{ $claim.claimName }}
+              name: {{ $claim.claimName }}
+            {{- end }}
+      {{- end }}
       containers:
         - name: standalone
           image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}
           imagePullPolicy: {{ .Values.image.pullPolicy }}
+          {{- with .Values.standalone.containerSecurityContext }}
+          securityContext:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
           env:
-            # Hostname configuration for headless service
-            - name: BYDB_NODE_HOST_PROVIDER
-              value: "hostname"
-            - name: BYDB_NODE_HOST
-              valueFrom:
-                fieldRef:
-                  fieldPath: metadata.name
-            {{- range $env := .Values.standalone.env }}
-            - name: {{ $env.name }}
-              value: {{ $env.value }}
-            {{- end }}
             {{- if .Values.standalone.tls}}
             {{- if .Values.standalone.tls.grpcSecretName }}
             - name: BYDB_TLS
@@ -79,6 +99,10 @@ spec:
               value: "/etc/tls/{{ .Values.standalone.tls.httpSecretName 
}}/tls.key"
             {{- end }}
             {{- end }}
+            {{- range $env := .Values.standalone.env }}
+            - name: {{ $env.name }}
+              value: {{ $env.value }}
+            {{- end }}
           args:
             - standalone
           ports:
@@ -154,13 +178,13 @@ spec:
             {{- range $claim := 
.Values.storage.standalone.persistentVolumeClaims }}
             {{- if $claim.existingClaimName }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.existingClaimName }}
               subPath: {{ . }}
             {{- end }}
             {{- else }}
             {{- range $claim.mountTargets }}
-            - mountPath: /data/{{ . }}
+            - mountPath: /tmp/{{ . }}
               name: {{ $claim.claimName }}
               subPath: {{ . }}
             {{- end }}
diff --git a/chart/values-lifecycle.yaml b/chart/values-lifecycle.yaml
index 21902e2..b801608 100644
--- a/chart/values-lifecycle.yaml
+++ b/chart/values-lifecycle.yaml
@@ -52,6 +52,19 @@ standalone:
   ## @param standalone.securityContext Security context for the pod
   ##
   securityContext: {}
+  ## @param standalone.containerSecurityContext Container-level security 
context
+  ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+  containerSecurityContext: {}
+  ## Volume permissions init container
+  ## @param standalone.volumePermissions.enabled Enable volume permissions 
init container
+  volumePermissions:
+    enabled: false
+    ## @param standalone.volumePermissions.chownUser User ID to chown the 
mounted volumes
+    chownUser: 1000
+    ## @param standalone.volumePermissions.chownGroup Group ID to chown the 
mounted volumes
+    chownGroup: 1000
+    ## @param standalone.volumePermissions.image Image for the volume 
permissions init container
+    image: busybox:1.36
   ## @param standalone.env Environment variables for the pod
   ##
   env: []
@@ -206,9 +219,24 @@ cluster:
     ## @param cluster.liaison.securityContext Security context for liaison pods
     ##
     securityContext: {}
+    ## @param cluster.liaison.containerSecurityContext Container-level 
security context for liaison
+    ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+    containerSecurityContext: {}
+    ## Volume permissions init container
+    ## @param cluster.liaison.volumePermissions.enabled Enable volume 
permissions init container for liaison
+    volumePermissions:
+      enabled: false
+      ## @param cluster.liaison.volumePermissions.chownUser User ID to chown 
the mounted volumes for liaison
+      chownUser: 1000
+      ## @param cluster.liaison.volumePermissions.chownGroup Group ID to chown 
the mounted volumes for liaison
+      chownGroup: 1000
+      ## @param cluster.liaison.volumePermissions.image Image for the volume 
permissions init container for liaison
+      image: busybox:1.36
     ## @param cluster.liaison.env Environment variables for liaison pods
     ##
-    env: []
+    env:
+      - name: BYDB_DATA_NODE_SELECTOR
+        value: "type=hot"
     ## @param cluster.liaison.priorityClassName Priority class name for 
liaison pods
     ##
     priorityClassName: ""
@@ -351,6 +379,19 @@ cluster:
       ## @param cluster.data.nodeTemplate.securityContext Security context for 
data pods
       ##
       securityContext: {}
+      ## @param cluster.data.nodeTemplate.containerSecurityContext 
Container-level security context for data pods
+      ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+      containerSecurityContext: {}
+      ## Volume permissions init container
+      ## @param cluster.data.nodeTemplate.volumePermissions.enabled Enable 
volume permissions init container for data pods
+      volumePermissions:
+        enabled: false
+        ## @param cluster.data.nodeTemplate.volumePermissions.chownUser User 
ID to chown the mounted volumes for data pods
+        chownUser: 1000
+        ## @param cluster.data.nodeTemplate.volumePermissions.chownGroup Group 
ID to chown the mounted volumes for data pods
+        chownGroup: 1000
+        ## @param cluster.data.nodeTemplate.volumePermissions.image Image for 
the volume permissions init container for data pods
+        image: busybox:1.36
       ## @param cluster.data.nodeTemplate.env Environment variables for data 
pods
       ##
       env: []
@@ -416,6 +457,9 @@ cluster:
         timeStyle: "daily"
         ## @param cluster.data.nodeTemplate.backupSidecar.schedule Backup 
schedule for data pods (cron format)
         schedule: "@hourly"
+        ## @param cluster.data.nodeTemplate.backupSidecar.customFlags Custom 
flags for backup sidecar (e.g., S3, Azure, GCS configuration)
+        ##
+        customFlags: []
         ##
         ## @param cluster.data.nodeTemplate.backupSidecar.resources Resources 
for backup sidecar for data pods
         ##
@@ -438,6 +482,9 @@ cluster:
         ## @param cluster.data.nodeTemplate.restoreInitContainer.enabled 
Enable restore init container for data pods (boolean)
         ##
         enabled: false
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.customFlags 
Custom flags for restore init container (e.g., S3, Azure, GCS configuration)
+        ##
+        customFlags: []
         ## @param cluster.data.nodeTemplate.restoreInitContainer.resources 
Resources for restore init container for data pods
         ##
         resources: {}
@@ -499,18 +546,24 @@ cluster:
     ## @extra cluster.data.roles List of data roles (hot, warm, cold)
     ##
     roles:
+      ## @param cluster.data.roles.hot Hot data role
+      ##
       hot:
         ## Override lifecycle sidecar settings for hot nodes
         lifecycleSidecar:
           ## @param cluster.data.roles.hot.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar for hot data pods
           schedule: "@daily"
           enabled: true
+      ## @param cluster.data.roles.warm Warm data role
+      ##
       warm:
         ## Override lifecycle sidecar settings for warm nodes
         lifecycleSidecar:
           ## @param cluster.data.roles.warm.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar for warm data pods
           schedule: "@daily"
           enabled: true
+      ## @param cluster.data.roles.cold Cold data role
+      ##
       cold:
         ## @param cluster.data.roles.cold.replicas Override number of cold 
data replicas
         replicas: 1
diff --git a/chart/values.yaml b/chart/values.yaml
index 4e08d5a..2d91078 100644
--- a/chart/values.yaml
+++ b/chart/values.yaml
@@ -52,6 +52,19 @@ standalone:
   ## @param standalone.securityContext Security context for the pod
   ##
   securityContext: {}
+  ## @param standalone.containerSecurityContext Container-level security 
context
+  ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+  containerSecurityContext: {}
+  ## Volume permissions init container
+  ## @param standalone.volumePermissions.enabled Enable volume permissions 
init container
+  volumePermissions:
+    enabled: false
+    ## @param standalone.volumePermissions.chownUser User ID to chown the 
mounted volumes
+    chownUser: 1000
+    ## @param standalone.volumePermissions.chownGroup Group ID to chown the 
mounted volumes
+    chownGroup: 1000
+    ## @param standalone.volumePermissions.image Image for the volume 
permissions init container
+    image: busybox:1.36
   ## @param standalone.env Environment variables for the pod
   ##
   env: []
@@ -206,6 +219,19 @@ cluster:
     ## @param cluster.liaison.securityContext Security context for liaison pods
     ##
     securityContext: {}
+    ## @param cluster.liaison.containerSecurityContext Container-level 
security context for liaison
+    ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+    containerSecurityContext: {}
+    ## Volume permissions init container
+    ## @param cluster.liaison.volumePermissions.enabled Enable volume 
permissions init container for liaison
+    volumePermissions:
+      enabled: false
+      ## @param cluster.liaison.volumePermissions.chownUser User ID to chown 
the mounted volumes for liaison
+      chownUser: 1000
+      ## @param cluster.liaison.volumePermissions.chownGroup Group ID to chown 
the mounted volumes for liaison
+      chownGroup: 1000
+      ## @param cluster.liaison.volumePermissions.image Image for the volume 
permissions init container for liaison
+      image: busybox:1.36
     ## @param cluster.liaison.env Environment variables for liaison pods
     ##
     env: []
@@ -351,6 +377,19 @@ cluster:
       ## @param cluster.data.nodeTemplate.securityContext Security context for 
data pods
       ##
       securityContext: {}
+      ## @param cluster.data.nodeTemplate.containerSecurityContext 
Container-level security context for data pods
+      ## e.g. { readOnlyRootFilesystem: true, allowPrivilegeEscalation: false, 
runAsNonRoot: true }
+      containerSecurityContext: {}
+      ## Volume permissions init container
+      ## @param cluster.data.nodeTemplate.volumePermissions.enabled Enable 
volume permissions init container for data pods
+      volumePermissions:
+        enabled: false
+        ## @param cluster.data.nodeTemplate.volumePermissions.chownUser User 
ID to chown the mounted volumes for data pods
+        chownUser: 1000
+        ## @param cluster.data.nodeTemplate.volumePermissions.chownGroup Group 
ID to chown the mounted volumes for data pods
+        chownGroup: 1000
+        ## @param cluster.data.nodeTemplate.volumePermissions.image Image for 
the volume permissions init container for data pods
+        image: busybox:1.36
       ## @param cluster.data.nodeTemplate.env Environment variables for data 
pods
       ##
       env: []
@@ -416,6 +455,9 @@ cluster:
         timeStyle: "daily"
         ## @param cluster.data.nodeTemplate.backupSidecar.schedule Backup 
schedule for data pods (cron format)
         schedule: "@hourly"
+        ## @param cluster.data.nodeTemplate.backupSidecar.customFlags Custom 
flags for backup sidecar (e.g., S3, Azure, GCS configuration)
+        ##
+        customFlags: []
         ##
         ## @param cluster.data.nodeTemplate.backupSidecar.resources Resources 
for backup sidecar for data pods
         ##
@@ -438,6 +480,9 @@ cluster:
         ## @param cluster.data.nodeTemplate.restoreInitContainer.enabled 
Enable restore init container for data pods (boolean)
         ##
         enabled: false
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.customFlags 
Custom flags for restore init container (e.g., S3, Azure, GCS configuration)
+        ##
+        customFlags: []
         ## @param cluster.data.nodeTemplate.restoreInitContainer.resources 
Resources for restore init container for data pods
         ##
         resources: {}
@@ -745,7 +790,7 @@ storage:
     persistentVolumeClaims:
     ## @param storage.standalone.persistentVolumeClaims[0].mountTargets Mount 
targets for the PVC
     ##
-    - mountTargets: [ "measure", "stream", "property" ]
+    - mountTargets: [ "measure", "stream", "metadata", "property" ]
       ## @param storage.standalone.persistentVolumeClaims[0].claimName Name of 
the PVC
       claimName: standalone-data
       ## @param storage.standalone.persistentVolumeClaims[0].size Size of the 
PVC
diff --git a/doc/backup.md b/doc/backup.md
index 66edc15..494278c 100644
--- a/doc/backup.md
+++ b/doc/backup.md
@@ -51,7 +51,7 @@ To enable restore:
           command: [ "--source=file:///backups" ]
   ```
 
-- Ensure that the backup, restore, and main containers share the required 
volumes (e.g., for `/data/stream`, `/data/measure`, and `/data/property`). This 
is typically configured via the Kubernetes volume definitions in the 
StatefulSet.
+- Ensure that the backup, restore, and main containers share the required 
volumes (e.g., for `/tmp/stream`, `/tmp/measure`, and `/tmp/property`). This is 
typically configured via the Kubernetes volume definitions in the StatefulSet.
 
 ## 3. Shared Volumes
 
@@ -92,18 +92,18 @@ Before triggering a restore, you may need to create and 
verify timedir marker fi
 
 ```sh
 restore timedir create 2025-02-12 \
-  --stream-root /data \
-  --measure-root /data \
-  --property-root /data
+  --stream-root /tmp \
+  --measure-root /tmp \
+  --property-root /tmp
 ```
 
 #### Verify Timedir Files
 
 ```sh
 restore timedir read \
-  --stream-root /data \
-  --measure-root /data \
-  --property-root /data
+  --stream-root /tmp \
+  --measure-root /tmp \
+  --property-root /tmp
 ```
 
 ### Triggering the Restore
@@ -120,7 +120,7 @@ Follow these steps to restore data:
    ```
 
 3. The new pod will start, and the init container will perform the restore 
process by:
-   - Reading the timedir marker files (e.g., `/data/stream/time-dir`).
+   - Reading the timedir marker files (e.g., `/tmp/stream/time-dir`).
    - Comparing local data with the remote backup snapshot.
    - Removing orphaned files and deleting the marker files upon successful 
restoration.
 
diff --git a/doc/parameters.md b/doc/parameters.md
index aaa9f96..22e7b8d 100644
--- a/doc/parameters.md
+++ b/doc/parameters.md
@@ -26,6 +26,11 @@ The content of this document describes the parameters that 
can be configured in
 | `standalone.enabled`                            | Enable standalone mode 
(boolean)                        | `false`        |
 | `standalone.podAnnotations`                     | Additional pod annotations 
                             | `{}`           |
 | `standalone.securityContext`                    | Security context for the 
pod                            | `{}`           |
+| `standalone.containerSecurityContext`           | Container-level security 
context                        | `{}`           |
+| `standalone.volumePermissions.enabled`          | Enable volume permissions 
init container                | `false`        |
+| `standalone.volumePermissions.chownUser`        | User ID to chown the 
mounted volumes                    | `1000`         |
+| `standalone.volumePermissions.chownGroup`       | Group ID to chown the 
mounted volumes                   | `1000`         |
+| `standalone.volumePermissions.image`            | Image for the volume 
permissions init container         | `busybox:1.36` |
 | `standalone.env`                                | Environment variables for 
the pod                       | `[]`           |
 | `standalone.priorityClassName`                  | Priority class name for 
the pod                         | `""`           |
 | `standalone.podDisruptionBudget`                | Pod disruption budget 
configuration                     | `{}`           |
@@ -74,103 +79,115 @@ The content of this document describes the parameters 
that can be configured in
 
 ### Configuration for liaison component
 
-| Name                                                          | Description  
                                  | Value           |
-| ------------------------------------------------------------- | 
---------------------------------------------- | --------------- |
-| `cluster.liaison.replicas`                                    | Number of 
liaison replicas                     | `2`             |
-| `cluster.liaison.podAnnotations`                              | Pod 
annotations for liaison                    | `{}`            |
-| `cluster.liaison.securityContext`                             | Security 
context for liaison pods              | `{}`            |
-| `cluster.liaison.env`                                         | Environment 
variables for liaison pods         | `[]`            |
-| `cluster.liaison.priorityClassName`                           | Priority 
class name for liaison pods           | `""`            |
-| `cluster.liaison.updateStrategy.type`                         | Update 
strategy type for liaison pods          | `RollingUpdate` |
-| `cluster.liaison.updateStrategy.rollingUpdate.maxUnavailable` | Maximum 
unavailable pods during update         | `1`             |
-| `cluster.liaison.podManagementPolicy`                         | Pod 
management policy for liaison StatefulSet  | `Parallel`      |
-| `cluster.liaison.podDisruptionBudget`                         | Pod 
disruption budget for liaison              | `{}`            |
-| `cluster.liaison.tolerations`                                 | Tolerations 
for liaison pods                   | `[]`            |
-| `cluster.liaison.nodeSelector`                                | Node 
selector for liaison pods                 | `[]`            |
-| `cluster.liaison.affinity`                                    | Affinity 
rules for liaison pods                | `{}`            |
-| `cluster.liaison.podAffinityPreset`                           | Pod affinity 
preset for liaison                | `""`            |
-| `cluster.liaison.podAntiAffinityPreset`                       | Pod 
anti-affinity preset for liaison           | `soft`          |
-| `cluster.liaison.resources.requests`                          | Resource 
requests for liaison pods             | `[]`            |
-| `cluster.liaison.resources.limits`                            | Resource 
limits for liaison pods               | `[]`            |
-| `cluster.liaison.grpcSvc.labels`                              | Labels for 
GRPC service for liaison            | `{}`            |
-| `cluster.liaison.grpcSvc.annotations`                         | Annotations 
for GRPC service for liaison       | `{}`            |
-| `cluster.liaison.grpcSvc.port`                                | Port number 
for GRPC service for liaison       | `17912`         |
-| `cluster.liaison.httpSvc.labels`                              | Labels for 
HTTP service for liaison            | `{}`            |
-| `cluster.liaison.httpSvc.annotations`                         | Annotations 
for HTTP service for liaison       | `{}`            |
-| `cluster.liaison.httpSvc.port`                                | Port number 
for HTTP service for liaison       | `17913`         |
-| `cluster.liaison.httpSvc.type`                                | Service type 
for HTTP service for liaison      | `LoadBalancer`  |
-| `cluster.liaison.httpSvc.externalIPs`                         | External IP 
addresses for liaison HTTP service | `[]`            |
-| `cluster.liaison.httpSvc.loadBalancerIP`                      | Load 
balancer IP for liaison HTTP service      | `nil`           |
-| `cluster.liaison.httpSvc.loadBalancerSourceRanges`            | Allowed 
source ranges for liaison HTTP service | `[]`            |
-| `cluster.liaison.ingress.enabled`                             | Enable 
ingress for liaison                     | `false`         |
-| `cluster.liaison.ingress.labels`                              | Labels for 
ingress of liaison                  | `{}`            |
-| `cluster.liaison.ingress.annotations`                         | Annotations 
for ingress of liaison             | `{}`            |
-| `cluster.liaison.ingress.rules`                               | Ingress 
rules for liaison                      | `[]`            |
-| `cluster.liaison.ingress.tls`                                 | TLS 
configuration for liaison ingress          | `[]`            |
-| `cluster.liaison.livenessProbe.initialDelaySeconds`           | Initial 
delay for liaison liveness probe       | `20`            |
-| `cluster.liaison.livenessProbe.periodSeconds`                 | Probe period 
for liaison liveness probe        | `30`            |
-| `cluster.liaison.livenessProbe.timeoutSeconds`                | Timeout in 
seconds for liaison liveness probe  | `5`             |
-| `cluster.liaison.livenessProbe.successThreshold`              | Success 
threshold for liaison liveness probe   | `1`             |
-| `cluster.liaison.livenessProbe.failureThreshold`              | Failure 
threshold for liaison liveness probe   | `5`             |
-| `cluster.liaison.readinessProbe.initialDelaySeconds`          | Initial 
delay for liaison readiness probe      | `20`            |
-| `cluster.liaison.readinessProbe.periodSeconds`                | Probe period 
for liaison readiness probe       | `30`            |
-| `cluster.liaison.readinessProbe.timeoutSeconds`               | Timeout in 
seconds for liaison readiness probe | `5`             |
-| `cluster.liaison.readinessProbe.successThreshold`             | Success 
threshold for liaison readiness probe  | `1`             |
-| `cluster.liaison.readinessProbe.failureThreshold`             | Failure 
threshold for liaison readiness probe  | `5`             |
-| `cluster.liaison.startupProbe.initialDelaySeconds`            | Initial 
delay for liaison startup probe        | `0`             |
-| `cluster.liaison.startupProbe.periodSeconds`                  | Probe period 
for liaison startup probe         | `10`            |
-| `cluster.liaison.startupProbe.timeoutSeconds`                 | Timeout in 
seconds for liaison startup probe   | `5`             |
-| `cluster.liaison.startupProbe.successThreshold`               | Success 
threshold for liaison startup probe    | `1`             |
-| `cluster.liaison.startupProbe.failureThreshold`               | Failure 
threshold for liaison startup probe    | `60`            |
+| Name                                                          | Description  
                                               | Value           |
+| ------------------------------------------------------------- | 
----------------------------------------------------------- | --------------- |
+| `cluster.liaison.replicas`                                    | Number of 
liaison replicas                                  | `2`             |
+| `cluster.liaison.podAnnotations`                              | Pod 
annotations for liaison                                 | `{}`            |
+| `cluster.liaison.securityContext`                             | Security 
context for liaison pods                           | `{}`            |
+| `cluster.liaison.containerSecurityContext`                    | 
Container-level security context for liaison                | `{}`            |
+| `cluster.liaison.volumePermissions.enabled`                   | Enable 
volume permissions init container for liaison        | `false`         |
+| `cluster.liaison.volumePermissions.chownUser`                 | User ID to 
chown the mounted volumes for liaison            | `1000`          |
+| `cluster.liaison.volumePermissions.chownGroup`                | Group ID to 
chown the mounted volumes for liaison           | `1000`          |
+| `cluster.liaison.volumePermissions.image`                     | Image for 
the volume permissions init container for liaison | `busybox:1.36`  |
+| `cluster.liaison.env`                                         | Environment 
variables for liaison pods                      | `[]`            |
+| `cluster.liaison.priorityClassName`                           | Priority 
class name for liaison pods                        | `""`            |
+| `cluster.liaison.updateStrategy.type`                         | Update 
strategy type for liaison pods                       | `RollingUpdate` |
+| `cluster.liaison.updateStrategy.rollingUpdate.maxUnavailable` | Maximum 
unavailable pods during update                      | `1`             |
+| `cluster.liaison.podManagementPolicy`                         | Pod 
management policy for liaison StatefulSet               | `Parallel`      |
+| `cluster.liaison.podDisruptionBudget`                         | Pod 
disruption budget for liaison                           | `{}`            |
+| `cluster.liaison.tolerations`                                 | Tolerations 
for liaison pods                                | `[]`            |
+| `cluster.liaison.nodeSelector`                                | Node 
selector for liaison pods                              | `[]`            |
+| `cluster.liaison.affinity`                                    | Affinity 
rules for liaison pods                             | `{}`            |
+| `cluster.liaison.podAffinityPreset`                           | Pod affinity 
preset for liaison                             | `""`            |
+| `cluster.liaison.podAntiAffinityPreset`                       | Pod 
anti-affinity preset for liaison                        | `soft`          |
+| `cluster.liaison.resources.requests`                          | Resource 
requests for liaison pods                          | `[]`            |
+| `cluster.liaison.resources.limits`                            | Resource 
limits for liaison pods                            | `[]`            |
+| `cluster.liaison.grpcSvc.labels`                              | Labels for 
GRPC service for liaison                         | `{}`            |
+| `cluster.liaison.grpcSvc.annotations`                         | Annotations 
for GRPC service for liaison                    | `{}`            |
+| `cluster.liaison.grpcSvc.port`                                | Port number 
for GRPC service for liaison                    | `17912`         |
+| `cluster.liaison.httpSvc.labels`                              | Labels for 
HTTP service for liaison                         | `{}`            |
+| `cluster.liaison.httpSvc.annotations`                         | Annotations 
for HTTP service for liaison                    | `{}`            |
+| `cluster.liaison.httpSvc.port`                                | Port number 
for HTTP service for liaison                    | `17913`         |
+| `cluster.liaison.httpSvc.type`                                | Service type 
for HTTP service for liaison                   | `LoadBalancer`  |
+| `cluster.liaison.httpSvc.externalIPs`                         | External IP 
addresses for liaison HTTP service              | `[]`            |
+| `cluster.liaison.httpSvc.loadBalancerIP`                      | Load 
balancer IP for liaison HTTP service                   | `nil`           |
+| `cluster.liaison.httpSvc.loadBalancerSourceRanges`            | Allowed 
source ranges for liaison HTTP service              | `[]`            |
+| `cluster.liaison.ingress.enabled`                             | Enable 
ingress for liaison                                  | `false`         |
+| `cluster.liaison.ingress.labels`                              | Labels for 
ingress of liaison                               | `{}`            |
+| `cluster.liaison.ingress.annotations`                         | Annotations 
for ingress of liaison                          | `{}`            |
+| `cluster.liaison.ingress.rules`                               | Ingress 
rules for liaison                                   | `[]`            |
+| `cluster.liaison.ingress.tls`                                 | TLS 
configuration for liaison ingress                       | `[]`            |
+| `cluster.liaison.livenessProbe.initialDelaySeconds`           | Initial 
delay for liaison liveness probe                    | `20`            |
+| `cluster.liaison.livenessProbe.periodSeconds`                 | Probe period 
for liaison liveness probe                     | `30`            |
+| `cluster.liaison.livenessProbe.timeoutSeconds`                | Timeout in 
seconds for liaison liveness probe               | `5`             |
+| `cluster.liaison.livenessProbe.successThreshold`              | Success 
threshold for liaison liveness probe                | `1`             |
+| `cluster.liaison.livenessProbe.failureThreshold`              | Failure 
threshold for liaison liveness probe                | `5`             |
+| `cluster.liaison.readinessProbe.initialDelaySeconds`          | Initial 
delay for liaison readiness probe                   | `20`            |
+| `cluster.liaison.readinessProbe.periodSeconds`                | Probe period 
for liaison readiness probe                    | `30`            |
+| `cluster.liaison.readinessProbe.timeoutSeconds`               | Timeout in 
seconds for liaison readiness probe              | `5`             |
+| `cluster.liaison.readinessProbe.successThreshold`             | Success 
threshold for liaison readiness probe               | `1`             |
+| `cluster.liaison.readinessProbe.failureThreshold`             | Failure 
threshold for liaison readiness probe               | `5`             |
+| `cluster.liaison.startupProbe.initialDelaySeconds`            | Initial 
delay for liaison startup probe                     | `0`             |
+| `cluster.liaison.startupProbe.periodSeconds`                  | Probe period 
for liaison startup probe                      | `10`            |
+| `cluster.liaison.startupProbe.timeoutSeconds`                 | Timeout in 
seconds for liaison startup probe                | `5`             |
+| `cluster.liaison.startupProbe.successThreshold`               | Success 
threshold for liaison startup probe                 | `1`             |
+| `cluster.liaison.startupProbe.failureThreshold`               | Failure 
threshold for liaison startup probe                 | `60`            |
 
 ### Configuration for data component
 
-| Name                                                           | Description 
                                          | Value                               
         |
-| -------------------------------------------------------------- | 
----------------------------------------------------- | 
-------------------------------------------- |
-| `cluster.data.nodeTemplate.replicas`                           | Number of 
data replicas by default                    | `2`                               
           |
-| `cluster.data.nodeTemplate.podAnnotations`                     | Pod 
annotations for data pods                         | `{}`                        
                 |
-| `cluster.data.nodeTemplate.securityContext`                    | Security 
context for data pods                        | `{}`                             
            |
-| `cluster.data.nodeTemplate.env`                                | Environment 
variables for data pods                   | `[]`                                
         |
-| `cluster.data.nodeTemplate.priorityClassName`                  | Priority 
class name for data pods                     | `""`                             
            |
-| `cluster.data.nodeTemplate.podDisruptionBudget.maxUnavailable` | Maximum 
unavailable data pods                         | `1`                             
             |
-| `cluster.data.nodeTemplate.tolerations`                        | Tolerations 
for data pods                             | `[]`                                
         |
-| `cluster.data.nodeTemplate.nodeSelector`                       | Node 
selector for data pods                           | `[]`                         
                |
-| `cluster.data.nodeTemplate.affinity`                           | Affinity 
rules for data pods                          | `{}`                             
            |
-| `cluster.data.nodeTemplate.podAffinityPreset`                  | Pod 
affinity preset for data pods                     | `""`                        
                 |
-| `cluster.data.nodeTemplate.podAntiAffinityPreset`              | Pod 
anti-affinity preset for data pods                | `soft`                      
                 |
-| `cluster.data.nodeTemplate.resources.requests`                 | Resource 
requests for data pods                       | `[]`                             
            |
-| `cluster.data.nodeTemplate.resources.limits`                   | Resource 
limits for data pods                         | `[]`                             
            |
-| `cluster.data.nodeTemplate.grpcSvc.labels`                     | Labels for 
GRPC service for data pods                 | `{}`                               
          |
-| `cluster.data.nodeTemplate.grpcSvc.annotations`                | Annotations 
for GRPC service for data pods            | `{}`                                
         |
-| `cluster.data.nodeTemplate.grpcSvc.port`                       | Port number 
for GRPC service for data pods            | `17912`                             
         |
-| `cluster.data.nodeTemplate.sidecar`                            | Sidecar 
containers for data pods                      | `[]`                            
             |
-| `cluster.data.nodeTemplate.backupSidecar.enabled`              | Enable 
backup sidecar for data pods (boolean)         | `false`                        
              |
-| `cluster.data.nodeTemplate.backupSidecar.dest`                 | Backup 
destination path for data pods                 | 
`file:///tmp/backups/data-$(ORDINAL_NUMBER)` |
-| `cluster.data.nodeTemplate.backupSidecar.timeStyle`            | Backup time 
style for data pods (e.g., daily)         | `daily`                             
         |
-| `cluster.data.nodeTemplate.backupSidecar.schedule`             | Backup 
schedule for data pods (cron format)           | `@hourly`                      
              |
-| `cluster.data.nodeTemplate.backupSidecar.resources`            | Resources 
for backup sidecar for data pods            | `{}`                              
           |
-| `cluster.data.nodeTemplate.lifecycleSidecar.enabled`           | Enable 
lifecycle sidecar for data pods (boolean)      | `false`                        
              |
-| `cluster.data.nodeTemplate.lifecycleSidecar.schedule`          | Schedule 
for lifecycle sidecar (cron format)          | `@hourly`                        
            |
-| `cluster.data.nodeTemplate.lifecycleSidecar.resources`         | Resources 
for lifecycle sidecar for data pods         | `{}`                              
           |
-| `cluster.data.nodeTemplate.restoreInitContainer.enabled`       | Enable 
restore init container for data pods (boolean) | `false`                        
              |
-| `cluster.data.nodeTemplate.restoreInitContainer.resources`     | Resources 
for restore init container for data pods    | `{}`                              
           |
-| `cluster.data.nodeTemplate.livenessProbe.initialDelaySeconds`  | Initial 
delay for data liveness probe                 | `20`                            
             |
-| `cluster.data.nodeTemplate.livenessProbe.periodSeconds`        | Probe 
period for data liveness probe                  | `30`                          
               |
-| `cluster.data.nodeTemplate.livenessProbe.timeoutSeconds`       | Timeout in 
seconds for data liveness probe            | `5`                                
          |
-| `cluster.data.nodeTemplate.livenessProbe.successThreshold`     | Success 
threshold for data liveness probe             | `1`                             
             |
-| `cluster.data.nodeTemplate.livenessProbe.failureThreshold`     | Failure 
threshold for data liveness probe             | `5`                             
             |
-| `cluster.data.nodeTemplate.readinessProbe.initialDelaySeconds` | Initial 
delay for data readiness probe                | `20`                            
             |
-| `cluster.data.nodeTemplate.readinessProbe.periodSeconds`       | Probe 
period for data readiness probe                 | `30`                          
               |
-| `cluster.data.nodeTemplate.readinessProbe.timeoutSeconds`      | Timeout in 
seconds for data readiness probe           | `5`                                
          |
-| `cluster.data.nodeTemplate.readinessProbe.successThreshold`    | Success 
threshold for data readiness probe            | `1`                             
             |
-| `cluster.data.nodeTemplate.readinessProbe.failureThreshold`    | Failure 
threshold for data readiness probe            | `5`                             
             |
-| `cluster.data.nodeTemplate.startupProbe.initialDelaySeconds`   | Initial 
delay for data startup probe                  | `0`                             
             |
-| `cluster.data.nodeTemplate.startupProbe.periodSeconds`         | Probe 
period for data startup probe                   | `10`                          
               |
-| `cluster.data.nodeTemplate.startupProbe.timeoutSeconds`        | Timeout in 
seconds for data startup probe             | `5`                                
          |
-| `cluster.data.nodeTemplate.startupProbe.successThreshold`      | Success 
threshold for data startup probe              | `1`                             
             |
-| `cluster.data.nodeTemplate.startupProbe.failureThreshold`      | Failure 
threshold for data startup probe              | `60`                            
             |
-| `cluster.data.roles`                                           | List of 
data roles (hot, warm, cold)                  |                                 
             |
-| `cluster.data.roles.hot`                                       | Hot data 
role                                         | `{}`                             
            |
+| Name                                                           | Description 
                                                                 | Value        
                                |
+| -------------------------------------------------------------- | 
---------------------------------------------------------------------------- | 
-------------------------------------------- |
+| `cluster.data.nodeTemplate.replicas`                           | Number of 
data replicas by default                                           | `2`        
                                  |
+| `cluster.data.nodeTemplate.podAnnotations`                     | Pod 
annotations for data pods                                                | `{}` 
                                        |
+| `cluster.data.nodeTemplate.securityContext`                    | Security 
context for data pods                                               | `{}`      
                                   |
+| `cluster.data.nodeTemplate.containerSecurityContext`           | 
Container-level security context for data pods                               | 
`{}`                                         |
+| `cluster.data.nodeTemplate.volumePermissions.enabled`          | Enable 
volume permissions init container for data pods                       | `false` 
                                     |
+| `cluster.data.nodeTemplate.volumePermissions.chownUser`        | User ID to 
chown the mounted volumes for data pods                           | `1000`      
                                 |
+| `cluster.data.nodeTemplate.volumePermissions.chownGroup`       | Group ID to 
chown the mounted volumes for data pods                          | `1000`       
                                |
+| `cluster.data.nodeTemplate.volumePermissions.image`            | Image for 
the volume permissions init container for data pods                | 
`busybox:1.36`                               |
+| `cluster.data.nodeTemplate.env`                                | Environment 
variables for data pods                                          | `[]`         
                                |
+| `cluster.data.nodeTemplate.priorityClassName`                  | Priority 
class name for data pods                                            | `""`      
                                   |
+| `cluster.data.nodeTemplate.podDisruptionBudget.maxUnavailable` | Maximum 
unavailable data pods                                                | `1`      
                                    |
+| `cluster.data.nodeTemplate.tolerations`                        | Tolerations 
for data pods                                                    | `[]`         
                                |
+| `cluster.data.nodeTemplate.nodeSelector`                       | Node 
selector for data pods                                                  | `[]`  
                                       |
+| `cluster.data.nodeTemplate.affinity`                           | Affinity 
rules for data pods                                                 | `{}`      
                                   |
+| `cluster.data.nodeTemplate.podAffinityPreset`                  | Pod 
affinity preset for data pods                                            | `""` 
                                        |
+| `cluster.data.nodeTemplate.podAntiAffinityPreset`              | Pod 
anti-affinity preset for data pods                                       | 
`soft`                                       |
+| `cluster.data.nodeTemplate.resources.requests`                 | Resource 
requests for data pods                                              | `[]`      
                                   |
+| `cluster.data.nodeTemplate.resources.limits`                   | Resource 
limits for data pods                                                | `[]`      
                                   |
+| `cluster.data.nodeTemplate.grpcSvc.labels`                     | Labels for 
GRPC service for data pods                                        | `{}`        
                                 |
+| `cluster.data.nodeTemplate.grpcSvc.annotations`                | Annotations 
for GRPC service for data pods                                   | `{}`         
                                |
+| `cluster.data.nodeTemplate.grpcSvc.port`                       | Port number 
for GRPC service for data pods                                   | `17912`      
                                |
+| `cluster.data.nodeTemplate.sidecar`                            | Sidecar 
containers for data pods                                             | `[]`     
                                    |
+| `cluster.data.nodeTemplate.backupSidecar.enabled`              | Enable 
backup sidecar for data pods (boolean)                                | `false` 
                                     |
+| `cluster.data.nodeTemplate.backupSidecar.dest`                 | Backup 
destination path for data pods                                        | 
`file:///tmp/backups/data-$(ORDINAL_NUMBER)` |
+| `cluster.data.nodeTemplate.backupSidecar.timeStyle`            | Backup time 
style for data pods (e.g., daily)                                | `daily`      
                                |
+| `cluster.data.nodeTemplate.backupSidecar.schedule`             | Backup 
schedule for data pods (cron format)                                  | 
`@hourly`                                    |
+| `cluster.data.nodeTemplate.backupSidecar.customFlags`          | Custom 
flags for backup sidecar (e.g., S3, Azure, GCS configuration)         | `[]`    
                                     |
+| `cluster.data.nodeTemplate.backupSidecar.resources`            | Resources 
for backup sidecar for data pods                                   | `{}`       
                                  |
+| `cluster.data.nodeTemplate.lifecycleSidecar.enabled`           | Enable 
lifecycle sidecar for data pods (boolean)                             | `false` 
                                     |
+| `cluster.data.nodeTemplate.lifecycleSidecar.schedule`          | Schedule 
for lifecycle sidecar (cron format)                                 | `@hourly` 
                                   |
+| `cluster.data.nodeTemplate.lifecycleSidecar.resources`         | Resources 
for lifecycle sidecar for data pods                                | `{}`       
                                  |
+| `cluster.data.nodeTemplate.restoreInitContainer.enabled`       | Enable 
restore init container for data pods (boolean)                        | `false` 
                                     |
+| `cluster.data.nodeTemplate.restoreInitContainer.customFlags`   | Custom 
flags for restore init container (e.g., S3, Azure, GCS configuration) | `[]`    
                                     |
+| `cluster.data.nodeTemplate.restoreInitContainer.resources`     | Resources 
for restore init container for data pods                           | `{}`       
                                  |
+| `cluster.data.nodeTemplate.livenessProbe.initialDelaySeconds`  | Initial 
delay for data liveness probe                                        | `20`     
                                    |
+| `cluster.data.nodeTemplate.livenessProbe.periodSeconds`        | Probe 
period for data liveness probe                                         | `30`   
                                      |
+| `cluster.data.nodeTemplate.livenessProbe.timeoutSeconds`       | Timeout in 
seconds for data liveness probe                                   | `5`         
                                 |
+| `cluster.data.nodeTemplate.livenessProbe.successThreshold`     | Success 
threshold for data liveness probe                                    | `1`      
                                    |
+| `cluster.data.nodeTemplate.livenessProbe.failureThreshold`     | Failure 
threshold for data liveness probe                                    | `5`      
                                    |
+| `cluster.data.nodeTemplate.readinessProbe.initialDelaySeconds` | Initial 
delay for data readiness probe                                       | `20`     
                                    |
+| `cluster.data.nodeTemplate.readinessProbe.periodSeconds`       | Probe 
period for data readiness probe                                        | `30`   
                                      |
+| `cluster.data.nodeTemplate.readinessProbe.timeoutSeconds`      | Timeout in 
seconds for data readiness probe                                  | `5`         
                                 |
+| `cluster.data.nodeTemplate.readinessProbe.successThreshold`    | Success 
threshold for data readiness probe                                   | `1`      
                                    |
+| `cluster.data.nodeTemplate.readinessProbe.failureThreshold`    | Failure 
threshold for data readiness probe                                   | `5`      
                                    |
+| `cluster.data.nodeTemplate.startupProbe.initialDelaySeconds`   | Initial 
delay for data startup probe                                         | `0`      
                                    |
+| `cluster.data.nodeTemplate.startupProbe.periodSeconds`         | Probe 
period for data startup probe                                          | `10`   
                                      |
+| `cluster.data.nodeTemplate.startupProbe.timeoutSeconds`        | Timeout in 
seconds for data startup probe                                    | `5`         
                                 |
+| `cluster.data.nodeTemplate.startupProbe.successThreshold`      | Success 
threshold for data startup probe                                     | `1`      
                                    |
+| `cluster.data.nodeTemplate.startupProbe.failureThreshold`      | Failure 
threshold for data startup probe                                     | `60`     
                                    |
+| `cluster.data.roles`                                           | List of 
data roles (hot, warm, cold)                                         |          
                                    |
+| `cluster.data.roles.hot`                                       | Hot data 
role                                                                | `{}`      
                                   |
 
 ### Configuration for UI component
 
@@ -296,16 +313,16 @@ The content of this document describes the parameters 
that can be configured in
 
 ### Client TLS configuration
 
-| Name                                    | Description                        
                            | Value      |
-| --------------------------------------- | 
-------------------------------------------------------------- | ---------- |
-| `etcd.auth.client.secureTransport`      | Enable TLS for client 
communication (boolean)                  | `false`    |
-| `etcd.auth.client.existingSecret`       | Existing secret containing TLS 
certs                           | `""`       |
-| `etcd.auth.client.enableAuthentication` | Enable client authentication 
(boolean)                         | `false`    |
-| `etcd.auth.client.certFilename`         | Name of the file containing the 
client certificate             | `cert.pem` |
-| `etcd.auth.client.certKeyFilename`      | Name of the file containing the 
client certificate private key | `key.pem`  |
-| `etcd.auth.client.caFilename`           | CA certificate filename for TLS    
                            | `""`       |
-| `etcd.auth.token.enabled`               | Enables token authentication       
                            | `true`     |
-| `etcd.auth.token.type`                  | Authentication token type. Allowed 
values: 'simple' or 'jwt'   | `jwt`      |
+| Name                                    | Description                        
                          | Value     |
+| --------------------------------------- | 
------------------------------------------------------------ | --------- |
+| `etcd.auth.client.secureTransport`      | Enable TLS for client 
communication (boolean)                | `false`   |
+| `etcd.auth.client.existingSecret`       | Existing secret containing TLS 
certs                         | `""`      |
+| `etcd.auth.client.enableAuthentication` | Enable client authentication 
(boolean)                       | `false`   |
+| `etcd.auth.client.certFilename`         | Client certificate filename        
                          | `tls.crt` |
+| `etcd.auth.client.certKeyFilename`      | Client certificate key filename    
                          | `tls.key` |
+| `etcd.auth.client.caFilename`           | CA certificate filename for TLS    
                          | `""`      |
+| `etcd.auth.token.enabled`               | Enables token authentication       
                          | `true`    |
+| `etcd.auth.token.type`                  | Authentication token type. Allowed 
values: 'simple' or 'jwt' | `simple`  |
 
 ### Liveness probe configuration for etcd
 
diff --git a/test/e2e/e2e-banyandb-cluster.yaml 
b/test/e2e/e2e-banyandb-cluster.yaml
index 9d126e1..d84b8ed 100644
--- a/test/e2e/e2e-banyandb-cluster.yaml
+++ b/test/e2e/e2e-banyandb-cluster.yaml
@@ -49,6 +49,11 @@ setup:
         helm dependency update
     - name: Install BanyanDB
       command: helm -n istio-system install banyandb chart/ -f 
test/e2e/values.cluster.yaml
+      wait:
+        - namespace: istio-system
+          resource: pod
+          for: condition=ready
+          label-selector: app.kubernetes.io/name=banyandb
     - name: Install SkyWalking
       command: |
         helm -n istio-system install --timeout 10m skywalking 
oci://ghcr.io/apache/skywalking-helm/skywalking-helm \
diff --git a/test/e2e/e2e-banyandb-standalone.yaml 
b/test/e2e/e2e-banyandb-lifecycle.yaml
similarity index 98%
copy from test/e2e/e2e-banyandb-standalone.yaml
copy to test/e2e/e2e-banyandb-lifecycle.yaml
index 986b243..ba39ce4 100644
--- a/test/e2e/e2e-banyandb-standalone.yaml
+++ b/test/e2e/e2e-banyandb-lifecycle.yaml
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This file is used to show how to write configuration files and can be used 
to test.
+# This file is used to test BanyanDB with lifecycle features enabled.
 
 setup:
   env: kind
@@ -47,8 +47,13 @@ setup:
         helm repo add bitnami https://charts.bitnami.com/bitnami
         cd chart
         helm dependency update
-    - name: Install BanyanDB
-      command: helm -n istio-system install banyandb chart/ -f 
test/e2e/values.standalone.yaml
+    - name: Install BanyanDB with lifecycle features
+      command: helm -n istio-system install banyandb chart/ -f 
test/e2e/values.lifecycle.yaml
+      wait:
+        - namespace: istio-system
+          resource: pod
+          for: condition=ready
+          label-selector: app.kubernetes.io/name=banyandb
     - name: Install SkyWalking
       command: |
         helm -n istio-system install --timeout 10m skywalking 
oci://ghcr.io/apache/skywalking-helm/skywalking-helm \
diff --git a/test/e2e/e2e-banyandb-standalone.yaml 
b/test/e2e/e2e-banyandb-standalone.yaml
index 986b243..3cce8e5 100644
--- a/test/e2e/e2e-banyandb-standalone.yaml
+++ b/test/e2e/e2e-banyandb-standalone.yaml
@@ -49,6 +49,11 @@ setup:
         helm dependency update
     - name: Install BanyanDB
       command: helm -n istio-system install banyandb chart/ -f 
test/e2e/values.standalone.yaml
+      wait:
+        - namespace: istio-system
+          resource: pod
+          for: condition=ready
+          label-selector: app.kubernetes.io/name=banyandb
     - name: Install SkyWalking
       command: |
         helm -n istio-system install --timeout 10m skywalking 
oci://ghcr.io/apache/skywalking-helm/skywalking-helm \
diff --git a/test/e2e/values.cluster.yaml b/test/e2e/values.cluster.yaml
index da0c0b3..e4b73a2 100644
--- a/test/e2e/values.cluster.yaml
+++ b/test/e2e/values.cluster.yaml
@@ -26,10 +26,24 @@ cluster:
   enabled: true
   etcdEndpoints: []
   liaison:
-    replicas: 1
+    replicas: 2
     podAnnotations:
       example: banyandb-foo
-    securityContext: {}
+    securityContext: 
+      runAsUser: 1000
+      runAsGroup: 1000
+      fsGroup: 1000
+    containerSecurityContext:
+      readOnlyRootFilesystem: true
+      allowPrivilegeEscalation: false
+      runAsNonRoot: true
+      capabilities:
+        drop: ["ALL"]
+    volumePermissions:
+      enabled: true
+      chownUser: 1000
+      chownGroup: 1000
+      image: busybox:1.36
     # runAsUser: 1000
     # runAsGroup: 1000
     # fsGroup: 1000
@@ -149,10 +163,24 @@ cluster:
 
   data:
     nodeTemplate:
-      replicas: 1
+      replicas: 2
       podAnnotations:
         example: banyandb-foo
-      securityContext: {}
+      securityContext: 
+        runAsUser: 1000
+        runAsGroup: 1000
+        fsGroup: 1000
+      containerSecurityContext:
+        readOnlyRootFilesystem: true
+        allowPrivilegeEscalation: false
+        runAsNonRoot: true
+        capabilities:
+          drop: ["ALL"]
+      volumePermissions:
+        enabled: true
+        chownUser: 1000
+        chownGroup: 1000
+        image: busybox:1.36
       # runAsUser: 1000
       # runAsGroup: 1000
       # fsGroup: 1000
@@ -232,9 +260,11 @@ cluster:
         dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
         timeStyle: "daily"
         schedule: "@every 10s"
+        customFlags: []
         resources: {}
       restoreInitContainer:
         enabled: true
+        customFlags: []
         resources: {}
       livenessProbe:
         initialDelaySeconds: 20
diff --git a/test/e2e/values.cluster.yaml b/test/e2e/values.lifecycle.yaml
similarity index 53%
copy from test/e2e/values.cluster.yaml
copy to test/e2e/values.lifecycle.yaml
index da0c0b3..90cb4ca 100644
--- a/test/e2e/values.cluster.yaml
+++ b/test/e2e/values.lifecycle.yaml
@@ -13,8 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Default values for banyandb.
-# This is a YAML-formatted file.
+# Lifecycle-enabled values for banyandb cluster mode.
+# This is a YAML-formatted file with lifecycle configuration enabled.
 # Declare variables to be passed into your templates.
 
 image:
@@ -26,16 +26,27 @@ cluster:
   enabled: true
   etcdEndpoints: []
   liaison:
-    replicas: 1
+    replicas: 2
     podAnnotations:
       example: banyandb-foo
-    securityContext: {}
-    # runAsUser: 1000
-    # runAsGroup: 1000
-    # fsGroup: 1000
-    env: []
-    # - name: BANYANDB_FOO
-    #   value: "bar"
+    securityContext: 
+      runAsUser: 1000
+      runAsGroup: 1000
+      fsGroup: 1000
+    containerSecurityContext:
+      readOnlyRootFilesystem: true
+      allowPrivilegeEscalation: false
+      runAsNonRoot: true
+      capabilities:
+        drop: ["ALL"]
+    volumePermissions:
+      enabled: true
+      chownUser: 1000
+      chownGroup: 1000
+      image: busybox:1.36
+    env:
+      - name: BYDB_DATA_NODE_SELECTOR
+        value: "type=hot"
     priorityClassName: ""
     podDisruptionBudget: {}
     updateStrategy:
@@ -43,59 +54,14 @@ cluster:
       rollingUpdate:
         maxUnavailable: 1
     podManagementPolicy: Parallel
-    # minAvailable: 1
-    # maxUnavailable: 2
-    # matchLabels:
-    #   - key: foo
-    #     value: bar
-    # matchExpressions:
-    #   - key: foo
-    #     operator: In
-    #     values: [bar, baz]
-    # paused: false
     tolerations: []
-    # - key: foo
-    #   value: bar
-    #   operator: Equal
-    #   effect: NoSchedule
     nodeSelector: []
-    # - key: foo
-    #   value: bar
     affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #       - matchExpressions:
-    #           - key: foo
-    #             operator: In
-    #             values:
-    #               - bar
-    #               - baz
-    # podAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     - labelSelector:
-    #         matchLabels:
-    #           - key: app
-    #             value: banyandb
-    #       topologyKey: "kubernetes.io/hostname"
-    #       namespaces: []
     podAffinityPreset: ""
     podAntiAffinityPreset: soft
     resources:
       requests: []
-      # - key: cpu
-      #   value: "100m"
-      # - key: memory
-      #   value: "256Mi"
       limits: []
-      # - key: cpu
-      #   value: "500m"
-      # - key: memory
-      #   value: "512Mi"
-      # tls:
-      #   grpcSecretName: ""
-      #   httpSecretName: ""
-      #   etcdSecretName: ""
 
     grpcSvc:
       labels: {}
@@ -116,15 +82,7 @@ cluster:
       labels: {}
       annotations: {}
       rules: []
-      # - host: localhost
-      #   paths:
-      #     - path: /
-      #       port: 17913
-      #       serviceName: banyandb-http
       tls: []
-      # - hosts:
-      #     - localhost
-      #   secretName: tls-secret
 
     livenessProbe:
       initialDelaySeconds: 20
@@ -149,71 +107,36 @@ cluster:
 
   data:
     nodeTemplate:
-      replicas: 1
+      replicas: 2
       podAnnotations:
         example: banyandb-foo
-      securityContext: {}
-      # runAsUser: 1000
-      # runAsGroup: 1000
-      # fsGroup: 1000
+      securityContext: 
+        runAsUser: 1000
+        runAsGroup: 1000
+        fsGroup: 1000
+      containerSecurityContext:
+        readOnlyRootFilesystem: true
+        allowPrivilegeEscalation: false
+        runAsNonRoot: true
+        capabilities:
+          drop: ["ALL"]
+      volumePermissions:
+        enabled: true
+        chownUser: 1000
+        chownGroup: 1000
+        image: busybox:1.36
       env: []
-      # - name: BANYANDB_FOO
-      #   value: "bar"
       priorityClassName: ""
       podDisruptionBudget:
         maxUnavailable: 1
-      # minAvailable: 1
-      # maxUnavailable: 2
-      # matchLabels:
-      #   - key: foo
-      #     value: bar
-      # matchExpressions:
-      #   - key: foo
-      #     operator: In
-      #     values: [bar, baz]
-      # paused: false
       tolerations: []
-      # - key: foo
-      #   value: bar
-      #   operator: Equal
-      #   effect: NoSchedule
       nodeSelector: []
-      # - key: foo
-      #   value: bar
       affinity: {}
-      # nodeAffinity:
-      #   requiredDuringSchedulingIgnoredDuringExecution:
-      #     nodeSelectorTerms:
-      #       - matchExpressions:
-      #           - key: foo
-      #             operator: In
-      #             values:
-      #               - bar
-      #               - baz
-      # podAffinity:
-      #   requiredDuringSchedulingIgnoredDuringExecution:
-      #     - labelSelector:
-      #         matchLabels:
-      #           - key: app
-      #             value: banyandb
-      #       topologyKey: "kubernetes.io/hostname"
-      #       namespaces: []
       podAffinityPreset: ""
       podAntiAffinityPreset: soft
       resources:
         requests: []
-        # - key: cpu
-        #   value: "100m"
-        # - key: memory
-        #   value: "256Mi"
         limits: []
-        # - key: cpu
-        #   value: "500m"
-        # - key: memory
-        #   value: "512Mi"
-        # tls:
-        #   grpcSecretName: ""
-        #   etcdSecretName: ""
 
       grpcSvc:
         labels: {}
@@ -221,21 +144,25 @@ cluster:
         port: 17912
 
       sidecar: []
-      # - name: cleanup-sidecar
-      #   image: busybox:latest
-      #   imagePullPolicy: IfNotPresent
-      #   commands: 
-      #     normal: ["sh", "-c", "while true; do echo 'sidecar task'; sleep 
60; done"]
-      #     preStop: ["sh", "-c", "echo cleanup"]
+
       backupSidecar:
         enabled: true
         dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
         timeStyle: "daily"
-        schedule: "@every 10s"
+        schedule: "@hourly"
+        customFlags: []
         resources: {}
+
+      lifecycleSidecar:
+        enabled: true
+        schedule: "@hourly"
+        resources: {}
+
       restoreInitContainer:
         enabled: true
+        customFlags: []
         resources: {}
+
       livenessProbe:
         initialDelaySeconds: 20
         periodSeconds: 5
@@ -256,25 +183,26 @@ cluster:
         timeoutSeconds: 5
         successThreshold: 1
         failureThreshold: 60
+
     roles:
-      hot: {}
+      hot:
+        lifecycleSidecar:
+          schedule: "@daily"
+          enabled: true
+      warm:
+        lifecycleSidecar:
+          schedule: "@daily"
+          enabled: true
+      cold:
+        replicas: 1
+
   ui:
-    # Available UI type: 
-    # None: Disable UI
-    # Standalone: Standalone UI deployment
-    # Embedded: Embedded UI in Liaison
     type: Embedded
     standalone:
       replicas: 1
-      podAnnotations: # example: banyandb-foo
-
+      podAnnotations:
       securityContext: {}
-      # runAsUser: 1000
-      # runAsGroup: 1000
-      # fsGroup: 1000
       env: []
-      # - name: BANYANDB_FOO
-      #   value: "bar"
       priorityClassName: ""
       updateStrategy:
         type: RollingUpdate
@@ -282,52 +210,14 @@ cluster:
           maxUnavailable: 1
           maxSurge: 1
       podDisruptionBudget: {}
-      #  minAvailable: 1
-      # maxUnavailable: 2
-      # paused: false
       tolerations: []
-      # - key: foo
-      #   value: bar
-      #   operator: Equal
-      #   effect: NoSchedule
       nodeSelector: []
-      # - key: foo
-      #   value: bar
       affinity: {}
-      # nodeAffinity:
-      #   requiredDuringSchedulingIgnoredDuringExecution:
-      #     nodeSelectorTerms:
-      #       - matchExpressions:
-      #           - key: foo
-      #             operator: In
-      #             values:
-      #               - bar
-      #               - baz
-      # podAffinity:
-      #   requiredDuringSchedulingIgnoredDuringExecution:
-      #     - labelSelector:
-      #         matchLabels:
-      #           - key: app
-      #             value: banyandb
-      #       topologyKey: "kubernetes.io/hostname"
-      #       namespaces: []
       podAffinityPreset: ""
       podAntiAffinityPreset: soft
       resources:
         requests: []
-        # - key: cpu
-        #   value: "100m"
-        # - key: memory
-        #   value: "256Mi"
         limits: []
-        # - key: cpu
-        #   value: "500m"
-        # - key: memory
-        #   value: "512Mi"
-        # tls:
-        # grpcSecretName: ""
-        # httpSecretName: ""
-        # etcdSecretName: ""
 
       httpSvc:
         labels: {}
@@ -343,15 +233,8 @@ cluster:
         labels: {}
         annotations: {}
         rules: []
-        # - host: localhost
-        #   paths:
-        #     - path: /
-        #       port: 17913
-        #       serviceName: banyandb-ui
         tls: []
-        # - hosts:
-        #     - localhost
-        #   secretName: tls-secret
+
       livenessProbe:
         initialDelaySeconds: 20
         periodSeconds: 30
@@ -377,24 +260,97 @@ storage:
   data:
     enabled: true
     persistentVolumeClaims:
-    - mountTargets: [ "measure", "stream", "property" ]
+    - mountTargets: [ "measure" ]
       nodeRole: hot
       existingClaimName: null
-      claimName: data
+      claimName: hot-measure-data
+      size: 50Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "stream" ]
+      nodeRole: hot
+      existingClaimName: null
+      claimName: hot-stream-data
+      size: 50Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "property" ]
+      nodeRole: hot
+      existingClaimName: null
+      claimName: hot-property-data
       size: 5Gi
       accessModes:
       - ReadWriteOnce
       storageClass: null
       volumeMode: Filesystem
+    - mountTargets: [ "measure" ]
+      nodeRole: warm
+      existingClaimName: null
+      claimName: warm-measure-data
+      size: 100Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "stream" ]
+      nodeRole: warm
+      existingClaimName: null
+      claimName: warm-stream-data
+      size: 100Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "property" ]
+      nodeRole: warm
+      existingClaimName: null
+      claimName: warm-property-data
+      size: 10Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "measure" ]
+      nodeRole: cold
+      existingClaimName: null
+      claimName: cold-measure-data
+      size: 200Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "stream" ]
+      nodeRole: cold
+      existingClaimName: null
+      claimName: cold-stream-data
+      size: 200Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
+    - mountTargets: [ "property" ]
+      nodeRole: cold
+      existingClaimName: null
+      claimName: cold-property-data
+      size: 20Gi
+      accessModes:
+      - ReadWriteOnce
+      storageClass: null
+      volumeMode: Filesystem
     - mountTargets: [ "backups" ]
       nodeRole: hot
       existingClaimName: null
-      claimName: backups
-      size: 5Gi
+      claimName: hot-backups
+      size: 10Gi
       accessModes:
       - ReadWriteOnce
       storageClass: null
       volumeMode: Filesystem
+
   liaison:
     enabled: true
     persistentVolumeClaims:
@@ -405,6 +361,7 @@ storage:
       - ReadWriteOnce
       storageClass: null
       volumeMode: Filesystem
+
   standalone:
     enabled: false
     persistentVolumeClaims:
@@ -417,12 +374,8 @@ storage:
       volumeMode: Filesystem
 
 serviceAccount:
-  # Specifies whether a service account should be created
   create: true
-  # Annotations to add to the service account
   annotations: {}
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname 
template
   name: ""
 
 etcd:
@@ -440,12 +393,19 @@ etcd:
       certFilename: tls.crt
       certKeyFilename: tls.key
       caFilename: ""
-      # extraEnvVars:
-      #   - name: ETCDCTL_CACERT
-      #     value: /opt/bitnami/etcd/certs/client/ca.crt
     token:
       enabled: true
       type: simple
+  livenessProbe:
+    initialDelaySeconds: 10
+  readinessProbe:
+    initialDelaySeconds: 10
+  autoCompactionMode: periodic
+  autoCompactionRetention: "1"
+  defrag:
+    enabled: true
+    cronjob:
+      schedule: "0 0 * * *"
 
 fullnameOverride: ""
 nameOverride: "banyandb"
diff --git a/test/e2e/values.standalone.yaml b/test/e2e/values.standalone.yaml
index 9100187..7b702a0 100644
--- a/test/e2e/values.standalone.yaml
+++ b/test/e2e/values.standalone.yaml
@@ -33,10 +33,10 @@ standalone:
   name: banyandb
   podAnnotations:
     example: banyandb-foo
-  securityContext: {}
-  # runAsUser: 1000
-  # runAsGroup: 1000
-  # fsGroup: 1000
+  securityContext: 
+    runAsUser: 1000
+    runAsGroup: 1000
+    fsGroup: 1000
   env: []
   # - name: BANYANDB_FOO
   #   value: "bar"
@@ -177,7 +177,7 @@ storage:
   standalone:
     enabled: false
     persistentVolumeClaims:
-    - mountTargets: [ "measure", "stream", "property" ]
+    - mountTargets: [ "measure", "stream", "metadata", "property" ]
       claimName: standalone-data
       size: 200Gi
       accessModes:


Reply via email to