surahman commented on pull request #3725:
URL: https://github.com/apache/incubator-heron/pull/3725#issuecomment-966758847


   @nicknezis: Setup and tear down of all PVCs with/without Pod Templates as 
well as testing without PVCs successful. I have dumped the `YAML` below to make 
life easier.
   
   **_STATIC:_**
   
   <details><summary>StatefulSet</summary>
   
   ```yaml
   apiVersion: apps/v1
   kind: StatefulSet
   metadata:
     creationTimestamp: "2021-11-12T01:56:24Z"
     generation: 1
     name: acking
     namespace: default
     resourceVersion: "1421"
     uid: 6e991292-575c-4cbb-b200-3e4a315cf4b6
   spec:
     podManagementPolicy: Parallel
     replicas: 3
     revisionHistoryLimit: 10
     selector:
       matchLabels:
         app: heron
         topology: acking
     serviceName: acking
     template:
       metadata:
         annotations:
           prometheus.io/port: "8080"
           prometheus.io/scrape: "true"
         creationTimestamp: null
         labels:
           app: heron
           topology: acking
       spec:
         containers:
         - command:
           - sh
           - -c
           - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
             
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--5861445565125076364.tar.gz
             . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
             --topology-name=acking 
--topology-id=ackingac5a762c-c2d2-45fc-8b59-f83acfff7564
             --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181
             --state-manager-root=/heron 
--state-manager-config-file=./heron-conf/statemgr.yaml
             --tmanager-binary=./heron-core/bin/heron-tmanager 
--stmgr-binary=./heron-core/bin/heron-stmgr
             --metrics-manager-classpath=./heron-core/lib/metricsmgr/* 
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)"
             --classpath=heron-api-examples.jar 
--heron-internals-config-file=./heron-conf/heron_internals.yaml
             --override-config-file=./heron-conf/override.yaml 
--component-ram-map=exclaim1:1073741824,word:1073741824
             --component-jvm-opts="" --pkg-type=jar 
--topology-binary-file=heron-api-examples.jar
             --heron-java-home=$JAVA_HOME 
--heron-shell-binary=./heron-core/bin/heron-shell
             --cluster=kubernetes --role=saad --environment=default 
--instance-classpath=./heron-core/lib/instance/*
             --metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml 
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
             --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
             
--metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
             --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
             --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
             --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
             --shard=$SHARD_ID --server-port=6001 
--tmanager-controller-port=6002 --tmanager-stats-port=6003
             --shell-port=6004 --metrics-manager-port=6005 
--scheduler-port=6006 --metricscache-manager-server-port=6007
             --metricscache-manager-stats-port=6008 
--checkpoint-manager-port=6009'
           env:
           - name: HOST
             valueFrom:
               fieldRef:
                 apiVersion: v1
                 fieldPath: status.podIP
           - name: POD_NAME
             valueFrom:
               fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.name
           - name: var_one
             value: variable one
           - name: var_three
             value: variable three
           - name: var_two
             value: variable two
           image: apache/heron:testbuild
           imagePullPolicy: IfNotPresent
           name: executor
           ports:
           - containerPort: 5555
             name: tcp-port-kept
             protocol: TCP
           - containerPort: 5556
             name: udp-port-kept
             protocol: UDP
           - containerPort: 6001
             name: server
             protocol: TCP
           - containerPort: 6002
             name: tmanager-ctl
             protocol: TCP
           - containerPort: 6003
             name: tmanager-stats
             protocol: TCP
           - containerPort: 6004
             name: shell-port
             protocol: TCP
           - containerPort: 6005
             name: metrics-mgr
             protocol: TCP
           - containerPort: 6006
             name: scheduler
             protocol: TCP
           - containerPort: 6007
             name: metrics-cache-m
             protocol: TCP
           - containerPort: 6008
             name: metrics-cache-s
             protocol: TCP
           - containerPort: 6009
             name: ckptmgr
             protocol: TCP
           resources:
             limits:
               cpu: "3"
               memory: 4Gi
             requests:
               cpu: "3"
               memory: 4Gi
           securityContext:
             allowPrivilegeEscalation: false
           terminationMessagePath: /dev/termination-log
           terminationMessagePolicy: File
           volumeMounts:
           - mountPath: /shared_volume
             name: shared-volume
           - mountPath: path/to/mount
             name: volumenameofchoice
             subPath: sub/path/to/mount
         - image: alpine
           imagePullPolicy: Always
           name: sidecar-container
           resources: {}
           terminationMessagePath: /dev/termination-log
           terminationMessagePolicy: File
           volumeMounts:
           - mountPath: /shared_volume
             name: shared-volume
         dnsPolicy: ClusterFirst
         restartPolicy: Always
         schedulerName: default-scheduler
         securityContext: {}
         terminationGracePeriodSeconds: 0
         tolerations:
         - effect: NoExecute
           key: node.kubernetes.io/not-ready
           operator: Exists
           tolerationSeconds: 10
         - effect: NoExecute
           key: node.kubernetes.io/unreachable
           operator: Exists
           tolerationSeconds: 10
         volumes:
         - emptyDir: {}
           name: shared-volume
     updateStrategy:
       rollingUpdate:
         partition: 0
       type: RollingUpdate
     volumeClaimTemplates:
     - apiVersion: v1
       kind: PersistentVolumeClaim
       metadata:
         creationTimestamp: null
         labels:
           onDemand: "true"
           topology: acking
         name: volumenameofchoice
       spec:
         accessModes:
         - ReadWriteOnce
         - ReadOnlyMany
         resources:
           requests:
             storage: 555Gi
         volumeMode: Block
       status:
         phase: Pending
   status:
     collisionCount: 0
     currentReplicas: 3
     currentRevision: acking-8688f48fb8
     observedGeneration: 1
     replicas: 3
     updateRevision: acking-8688f48fb8
     updatedReplicas: 3
   ```
   
   </details>
   
   <details><summary>Pod</summary>
   
   ```yaml
   apiVersion: v1
   kind: Pod
   metadata:
     annotations:
       prometheus.io/port: "8080"
       prometheus.io/scrape: "true"
     creationTimestamp: "2021-11-12T01:50:55Z"
     generateName: acking-
     labels:
       app: heron
       controller-revision-hash: acking-5f56945486
       statefulset.kubernetes.io/pod-name: acking-1
       topology: acking
     name: acking-1
     namespace: default
     ownerReferences:
     - apiVersion: apps/v1
       blockOwnerDeletion: true
       controller: true
       kind: StatefulSet
       name: acking
       uid: 22b719a9-9b7d-4d63-8365-e76b44a50abe
     resourceVersion: "960"
     uid: 35118297-0949-4648-b12f-8b755e583436
   spec:
     containers:
     - command:
       - sh
       - -c
       - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
         
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--1817385379878734459.tar.gz
         . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
         --topology-name=acking 
--topology-id=ackingff8ff63d-977f-49ba-8521-11375f9c41d6
         --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181 --state-manager-root=/heron
         --state-manager-config-file=./heron-conf/statemgr.yaml 
--tmanager-binary=./heron-core/bin/heron-tmanager
         --stmgr-binary=./heron-core/bin/heron-stmgr 
--metrics-manager-classpath=./heron-core/lib/metricsmgr/*
         
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)" 
--classpath=heron-api-examples.jar
         --heron-internals-config-file=./heron-conf/heron_internals.yaml 
--override-config-file=./heron-conf/override.yaml
         --component-ram-map=exclaim1:1073741824,word:1073741824 
--component-jvm-opts=""
         --pkg-type=jar --topology-binary-file=heron-api-examples.jar 
--heron-java-home=$JAVA_HOME
         --heron-shell-binary=./heron-core/bin/heron-shell --cluster=kubernetes 
--role=saad
         --environment=default --instance-classpath=./heron-core/lib/instance/* 
--metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml
         
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
         --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
         --metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
         --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
         --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
         --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
         --shard=$SHARD_ID --server-port=6001 --tmanager-controller-port=6002 
--tmanager-stats-port=6003
         --shell-port=6004 --metrics-manager-port=6005 --scheduler-port=6006 
--metricscache-manager-server-port=6007
         --metricscache-manager-stats-port=6008 --checkpoint-manager-port=6009'
       env:
       - name: HOST
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: status.podIP
       - name: POD_NAME
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: metadata.name
       - name: var_one
         value: variable one
       - name: var_three
         value: variable three
       - name: var_two
         value: variable two
       image: apache/heron:testbuild
       imagePullPolicy: IfNotPresent
       name: executor
       ports:
       - containerPort: 5555
         name: tcp-port-kept
         protocol: TCP
       - containerPort: 5556
         name: udp-port-kept
         protocol: UDP
       - containerPort: 6001
         name: server
         protocol: TCP
       - containerPort: 6002
         name: tmanager-ctl
         protocol: TCP
       - containerPort: 6003
         name: tmanager-stats
         protocol: TCP
       - containerPort: 6004
         name: shell-port
         protocol: TCP
       - containerPort: 6005
         name: metrics-mgr
         protocol: TCP
       - containerPort: 6006
         name: scheduler
         protocol: TCP
       - containerPort: 6007
         name: metrics-cache-m
         protocol: TCP
       - containerPort: 6008
         name: metrics-cache-s
         protocol: TCP
       - containerPort: 6009
         name: ckptmgr
         protocol: TCP
       resources:
         limits:
           cpu: "3"
           memory: 4Gi
         requests:
           cpu: "3"
           memory: 4Gi
       securityContext:
         allowPrivilegeEscalation: false
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: path/to/mount
         name: volumenameofchoice
         subPath: sub/path/to/mount
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-b8b88
         readOnly: true
     - image: alpine
       imagePullPolicy: Always
       name: sidecar-container
       resources: {}
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-b8b88
         readOnly: true
     dnsPolicy: ClusterFirst
     enableServiceLinks: true
     hostname: acking-1
     preemptionPolicy: PreemptLowerPriority
     priority: 0
     restartPolicy: Always
     schedulerName: default-scheduler
     securityContext: {}
     serviceAccount: default
     serviceAccountName: default
     subdomain: acking
     terminationGracePeriodSeconds: 0
     tolerations:
     - effect: NoExecute
       key: node.kubernetes.io/not-ready
       operator: Exists
       tolerationSeconds: 10
     - effect: NoExecute
       key: node.kubernetes.io/unreachable
       operator: Exists
       tolerationSeconds: 10
     volumes:
     - name: volumenameofchoice
       persistentVolumeClaim:
         claimName: volumenameofchoice-acking-1
     - emptyDir: {}
       name: shared-volume
     - name: kube-api-access-b8b88
       projected:
         defaultMode: 420
         sources:
         - serviceAccountToken:
             expirationSeconds: 3607
             path: token
         - configMap:
             items:
             - key: ca.crt
               path: ca.crt
             name: kube-root-ca.crt
         - downwardAPI:
             items:
             - fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.namespace
               path: namespace
   status:
     conditions:
     - lastProbeTime: null
       lastTransitionTime: "2021-11-12T01:50:55Z"
       message: '0/1 nodes are available: 1 pod has unbound immediate 
PersistentVolumeClaims.'
       reason: Unschedulable
       status: "False"
       type: PodScheduled
     phase: Pending
     qosClass: Burstable
   ```
   
   </details>
   
   <details><summary>PVC</summary>
   
   ```yaml
   apiVersion: v1
   kind: PersistentVolumeClaim
   metadata:
     annotations:
       volume.beta.kubernetes.io/storage-provisioner: k8s.io/minikube-hostpath
     creationTimestamp: "2021-11-12T01:50:55Z"
     finalizers:
     - kubernetes.io/pvc-protection
     labels:
       app: heron
       onDemand: "true"
       topology: acking
     name: volumenameofchoice-acking-0
     namespace: default
     resourceVersion: "954"
     uid: 0e8c8aec-ce54-410e-8790-5da2238af798
   spec:
     accessModes:
     - ReadWriteOnce
     - ReadOnlyMany
     resources:
       requests:
         storage: 555Gi
     storageClassName: standard
     volumeMode: Block
   status:
     phase: Pending
   ```
   
   </details>
   
   **_DYNAMIC:_**
   
   <details><summary>StatefulSet</summary>
   
   ```yaml
   apiVersion: apps/v1
   kind: StatefulSet
   metadata:
     creationTimestamp: "2021-11-12T01:53:56Z"
     generation: 1
     name: acking
     namespace: default
     resourceVersion: "1212"
     uid: 597877ea-4601-4bf0-80e3-72f9db78ad47
   spec:
     podManagementPolicy: Parallel
     replicas: 3
     revisionHistoryLimit: 10
     selector:
       matchLabels:
         app: heron
         topology: acking
     serviceName: acking
     template:
       metadata:
         annotations:
           prometheus.io/port: "8080"
           prometheus.io/scrape: "true"
         creationTimestamp: null
         labels:
           app: heron
           topology: acking
       spec:
         containers:
         - command:
           - sh
           - -c
           - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
             
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--6172080214638514375.tar.gz
             . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
             --topology-name=acking 
--topology-id=acking6a21ddc3-dbab-4c25-8a54-dcd3b0264b55
             --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181
             --state-manager-root=/heron 
--state-manager-config-file=./heron-conf/statemgr.yaml
             --tmanager-binary=./heron-core/bin/heron-tmanager 
--stmgr-binary=./heron-core/bin/heron-stmgr
             --metrics-manager-classpath=./heron-core/lib/metricsmgr/* 
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)"
             --classpath=heron-api-examples.jar 
--heron-internals-config-file=./heron-conf/heron_internals.yaml
             --override-config-file=./heron-conf/override.yaml 
--component-ram-map=exclaim1:1073741824,word:1073741824
             --component-jvm-opts="" --pkg-type=jar 
--topology-binary-file=heron-api-examples.jar
             --heron-java-home=$JAVA_HOME 
--heron-shell-binary=./heron-core/bin/heron-shell
             --cluster=kubernetes --role=saad --environment=default 
--instance-classpath=./heron-core/lib/instance/*
             --metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml 
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
             --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
             
--metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
             --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
             --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
             --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
             --shard=$SHARD_ID --server-port=6001 
--tmanager-controller-port=6002 --tmanager-stats-port=6003
             --shell-port=6004 --metrics-manager-port=6005 
--scheduler-port=6006 --metricscache-manager-server-port=6007
             --metricscache-manager-stats-port=6008 
--checkpoint-manager-port=6009'
           env:
           - name: HOST
             valueFrom:
               fieldRef:
                 apiVersion: v1
                 fieldPath: status.podIP
           - name: POD_NAME
             valueFrom:
               fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.name
           - name: var_one
             value: variable one
           - name: var_three
             value: variable three
           - name: var_two
             value: variable two
           image: apache/heron:testbuild
           imagePullPolicy: IfNotPresent
           name: executor
           ports:
           - containerPort: 5555
             name: tcp-port-kept
             protocol: TCP
           - containerPort: 5556
             name: udp-port-kept
             protocol: UDP
           - containerPort: 6001
             name: server
             protocol: TCP
           - containerPort: 6002
             name: tmanager-ctl
             protocol: TCP
           - containerPort: 6003
             name: tmanager-stats
             protocol: TCP
           - containerPort: 6004
             name: shell-port
             protocol: TCP
           - containerPort: 6005
             name: metrics-mgr
             protocol: TCP
           - containerPort: 6006
             name: scheduler
             protocol: TCP
           - containerPort: 6007
             name: metrics-cache-m
             protocol: TCP
           - containerPort: 6008
             name: metrics-cache-s
             protocol: TCP
           - containerPort: 6009
             name: ckptmgr
             protocol: TCP
           resources:
             limits:
               cpu: "3"
               memory: 4Gi
             requests:
               cpu: "3"
               memory: 4Gi
           securityContext:
             allowPrivilegeEscalation: false
           terminationMessagePath: /dev/termination-log
           terminationMessagePolicy: File
           volumeMounts:
           - mountPath: /shared_volume
             name: shared-volume
           - mountPath: path/to/mount
             name: volumenameofchoice
             subPath: sub/path/to/mount
         - image: alpine
           imagePullPolicy: Always
           name: sidecar-container
           resources: {}
           terminationMessagePath: /dev/termination-log
           terminationMessagePolicy: File
           volumeMounts:
           - mountPath: /shared_volume
             name: shared-volume
         dnsPolicy: ClusterFirst
         restartPolicy: Always
         schedulerName: default-scheduler
         securityContext: {}
         terminationGracePeriodSeconds: 0
         tolerations:
         - effect: NoExecute
           key: node.kubernetes.io/not-ready
           operator: Exists
           tolerationSeconds: 10
         - effect: NoExecute
           key: node.kubernetes.io/unreachable
           operator: Exists
           tolerationSeconds: 10
         volumes:
         - emptyDir: {}
           name: shared-volume
     updateStrategy:
       rollingUpdate:
         partition: 0
       type: RollingUpdate
     volumeClaimTemplates:
     - apiVersion: v1
       kind: PersistentVolumeClaim
       metadata:
         creationTimestamp: null
         labels:
           onDemand: "true"
           topology: acking
         name: volumenameofchoice
       spec:
         accessModes:
         - ReadWriteOnce
         - ReadOnlyMany
         resources:
           requests:
             storage: 555Gi
         storageClassName: storage-class-name
         volumeMode: Block
       status:
         phase: Pending
   status:
     collisionCount: 0
     currentReplicas: 3
     currentRevision: acking-5cbfb4df4c
     observedGeneration: 1
     replicas: 3
     updateRevision: acking-5cbfb4df4c
     updatedReplicas: 3
   ```
   
   </details>
   
   <details><summary>Pod</summary>
   
   ```yaml
   apiVersion: v1
   kind: Pod
   metadata:
     annotations:
       prometheus.io/port: "8080"
       prometheus.io/scrape: "true"
     creationTimestamp: "2021-11-12T01:53:56Z"
     generateName: acking-
     labels:
       app: heron
       controller-revision-hash: acking-5cbfb4df4c
       statefulset.kubernetes.io/pod-name: acking-1
       topology: acking
     name: acking-1
     namespace: default
     ownerReferences:
     - apiVersion: apps/v1
       blockOwnerDeletion: true
       controller: true
       kind: StatefulSet
       name: acking
       uid: 597877ea-4601-4bf0-80e3-72f9db78ad47
     resourceVersion: "1205"
     uid: eec86082-2259-4fe7-999a-10f7e58d2e79
   spec:
     containers:
     - command:
       - sh
       - -c
       - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
         
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--6172080214638514375.tar.gz
         . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
         --topology-name=acking 
--topology-id=acking6a21ddc3-dbab-4c25-8a54-dcd3b0264b55
         --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181 --state-manager-root=/heron
         --state-manager-config-file=./heron-conf/statemgr.yaml 
--tmanager-binary=./heron-core/bin/heron-tmanager
         --stmgr-binary=./heron-core/bin/heron-stmgr 
--metrics-manager-classpath=./heron-core/lib/metricsmgr/*
         
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)" 
--classpath=heron-api-examples.jar
         --heron-internals-config-file=./heron-conf/heron_internals.yaml 
--override-config-file=./heron-conf/override.yaml
         --component-ram-map=exclaim1:1073741824,word:1073741824 
--component-jvm-opts=""
         --pkg-type=jar --topology-binary-file=heron-api-examples.jar 
--heron-java-home=$JAVA_HOME
         --heron-shell-binary=./heron-core/bin/heron-shell --cluster=kubernetes 
--role=saad
         --environment=default --instance-classpath=./heron-core/lib/instance/* 
--metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml
         
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
         --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
         --metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
         --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
         --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
         --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
         --shard=$SHARD_ID --server-port=6001 --tmanager-controller-port=6002 
--tmanager-stats-port=6003
         --shell-port=6004 --metrics-manager-port=6005 --scheduler-port=6006 
--metricscache-manager-server-port=6007
         --metricscache-manager-stats-port=6008 --checkpoint-manager-port=6009'
       env:
       - name: HOST
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: status.podIP
       - name: POD_NAME
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: metadata.name
       - name: var_one
         value: variable one
       - name: var_three
         value: variable three
       - name: var_two
         value: variable two
       image: apache/heron:testbuild
       imagePullPolicy: IfNotPresent
       name: executor
       ports:
       - containerPort: 5555
         name: tcp-port-kept
         protocol: TCP
       - containerPort: 5556
         name: udp-port-kept
         protocol: UDP
       - containerPort: 6001
         name: server
         protocol: TCP
       - containerPort: 6002
         name: tmanager-ctl
         protocol: TCP
       - containerPort: 6003
         name: tmanager-stats
         protocol: TCP
       - containerPort: 6004
         name: shell-port
         protocol: TCP
       - containerPort: 6005
         name: metrics-mgr
         protocol: TCP
       - containerPort: 6006
         name: scheduler
         protocol: TCP
       - containerPort: 6007
         name: metrics-cache-m
         protocol: TCP
       - containerPort: 6008
         name: metrics-cache-s
         protocol: TCP
       - containerPort: 6009
         name: ckptmgr
         protocol: TCP
       resources:
         limits:
           cpu: "3"
           memory: 4Gi
         requests:
           cpu: "3"
           memory: 4Gi
       securityContext:
         allowPrivilegeEscalation: false
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: path/to/mount
         name: volumenameofchoice
         subPath: sub/path/to/mount
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-bdwcl
         readOnly: true
     - image: alpine
       imagePullPolicy: Always
       name: sidecar-container
       resources: {}
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-bdwcl
         readOnly: true
     dnsPolicy: ClusterFirst
     enableServiceLinks: true
     hostname: acking-1
     preemptionPolicy: PreemptLowerPriority
     priority: 0
     restartPolicy: Always
     schedulerName: default-scheduler
     securityContext: {}
     serviceAccount: default
     serviceAccountName: default
     subdomain: acking
     terminationGracePeriodSeconds: 0
     tolerations:
     - effect: NoExecute
       key: node.kubernetes.io/not-ready
       operator: Exists
       tolerationSeconds: 10
     - effect: NoExecute
       key: node.kubernetes.io/unreachable
       operator: Exists
       tolerationSeconds: 10
     volumes:
     - name: volumenameofchoice
       persistentVolumeClaim:
         claimName: volumenameofchoice-acking-1
     - emptyDir: {}
       name: shared-volume
     - name: kube-api-access-bdwcl
       projected:
         defaultMode: 420
         sources:
         - serviceAccountToken:
             expirationSeconds: 3607
             path: token
         - configMap:
             items:
             - key: ca.crt
               path: ca.crt
             name: kube-root-ca.crt
         - downwardAPI:
             items:
             - fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.namespace
               path: namespace
   status:
     conditions:
     - lastProbeTime: null
       lastTransitionTime: "2021-11-12T01:53:56Z"
       message: '0/1 nodes are available: 1 pod has unbound immediate 
PersistentVolumeClaims.'
       reason: Unschedulable
       status: "False"
       type: PodScheduled
     phase: Pending
     qosClass: Burstable
   ```
   
   </details>
   
   <details><summary>PVC</summary>
   
   ```yaml
   apiVersion: v1
   kind: PersistentVolumeClaim
   metadata:
     creationTimestamp: "2021-11-12T01:53:56Z"
     finalizers:
     - kubernetes.io/pvc-protection
     labels:
       app: heron
       onDemand: "true"
       topology: acking
     name: volumenameofchoice-acking-0
     namespace: default
     resourceVersion: "1190"
     uid: b7a4c403-b749-452b-ac70-16c6a9e158d6
   spec:
     accessModes:
     - ReadWriteOnce
     - ReadOnlyMany
     resources:
       requests:
         storage: 555Gi
     storageClassName: storage-class-name
     volumeMode: Block
   status:
     phase: Pending
   ```
   
   </details>


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to