surahman commented on pull request #3725:
URL: https://github.com/apache/incubator-heron/pull/3725#issuecomment-965813459


   I can confirm the ability to launch and kill a topology with/without a Pod 
Template as well as being able to launch a topology with a statically and 
dynamically backed PVC. The dynamically backed PVC is deleted on kill. I am 
testing in Minikube on a clean environment (`delete --all`) with the latest PR 
build.
   
   <details><summary>Without dynamic PVC: kubectl get pod acking-1 -o 
yaml</summary>
   
   ```yaml
   apiVersion: v1
   kind: Pod
   metadata:
     annotations:
       prometheus.io/port: "8080"
       prometheus.io/scrape: "true"
     creationTimestamp: "2021-11-10T22:36:19Z"
     generateName: acking-
     labels:
       app: heron
       controller-revision-hash: acking-7cf596b959
       statefulset.kubernetes.io/pod-name: acking-1
       topology: acking
     name: acking-1
     namespace: default
     ownerReferences:
     - apiVersion: apps/v1
       blockOwnerDeletion: true
       controller: true
       kind: StatefulSet
       name: acking
       uid: 7638b318-645b-41e1-b0c9-47e124207a13
     resourceVersion: "852"
     uid: 1e125904-6c32-4c69-a48a-ba85a4136d9b
   spec:
     containers:
     - command:
       - sh
       - -c
       - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
         
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--358503390991778356.tar.gz
         . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
         --topology-name=acking 
--topology-id=acking8c323447-bce6-4a35-9b2f-7b6e1b9cc46d
         --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181 --state-manager-root=/heron
         --state-manager-config-file=./heron-conf/statemgr.yaml 
--tmanager-binary=./heron-core/bin/heron-tmanager
         --stmgr-binary=./heron-core/bin/heron-stmgr 
--metrics-manager-classpath=./heron-core/lib/metricsmgr/*
         
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)" 
--classpath=heron-api-examples.jar
         --heron-internals-config-file=./heron-conf/heron_internals.yaml 
--override-config-file=./heron-conf/override.yaml
         --component-ram-map=exclaim1:1073741824,word:1073741824 
--component-jvm-opts=""
         --pkg-type=jar --topology-binary-file=heron-api-examples.jar 
--heron-java-home=$JAVA_HOME
         --heron-shell-binary=./heron-core/bin/heron-shell --cluster=kubernetes 
--role=saad
         --environment=default --instance-classpath=./heron-core/lib/instance/* 
--metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml
         
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
         --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
         --metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
         --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
         --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
         --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
         --shard=$SHARD_ID --server-port=6001 --tmanager-controller-port=6002 
--tmanager-stats-port=6003
         --shell-port=6004 --metrics-manager-port=6005 --scheduler-port=6006 
--metricscache-manager-server-port=6007
         --metricscache-manager-stats-port=6008 --checkpoint-manager-port=6009'
       env:
       - name: HOST
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: status.podIP
       - name: POD_NAME
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: metadata.name
       - name: var_one
         value: variable one
       - name: var_three
         value: variable three
       - name: var_two
         value: variable two
       image: apache/heron:testbuild
       imagePullPolicy: IfNotPresent
       name: executor
       ports:
       - containerPort: 5555
         name: tcp-port-kept
         protocol: TCP
       - containerPort: 5556
         name: udp-port-kept
         protocol: UDP
       - containerPort: 6001
         name: server
         protocol: TCP
       - containerPort: 6002
         name: tmanager-ctl
         protocol: TCP
       - containerPort: 6003
         name: tmanager-stats
         protocol: TCP
       - containerPort: 6004
         name: shell-port
         protocol: TCP
       - containerPort: 6005
         name: metrics-mgr
         protocol: TCP
       - containerPort: 6006
         name: scheduler
         protocol: TCP
       - containerPort: 6007
         name: metrics-cache-m
         protocol: TCP
       - containerPort: 6008
         name: metrics-cache-s
         protocol: TCP
       - containerPort: 6009
         name: ckptmgr
         protocol: TCP
       resources:
         limits:
           cpu: "3"
           memory: 4Gi
         requests:
           cpu: "3"
           memory: 4Gi
       securityContext:
         allowPrivilegeEscalation: false
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: path/to/mount
         name: volumenameofchoice
         subPath: sub/path/to/mount
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-lbslq
         readOnly: true
     - image: alpine
       imagePullPolicy: Always
       name: sidecar-container
       resources: {}
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-lbslq
         readOnly: true
     dnsPolicy: ClusterFirst
     enableServiceLinks: true
     hostname: acking-1
     preemptionPolicy: PreemptLowerPriority
     priority: 0
     restartPolicy: Always
     schedulerName: default-scheduler
     securityContext: {}
     serviceAccount: default
     serviceAccountName: default
     subdomain: acking
     terminationGracePeriodSeconds: 0
     tolerations:
     - effect: NoExecute
       key: node.kubernetes.io/not-ready
       operator: Exists
       tolerationSeconds: 10
     - effect: NoExecute
       key: node.kubernetes.io/unreachable
       operator: Exists
       tolerationSeconds: 10
     volumes:
     - emptyDir: {}
       name: shared-volume
     - name: volumenameofchoice
       persistentVolumeClaim:
         claimName: volume-claim-name
     - name: kube-api-access-lbslq
       projected:
         defaultMode: 420
         sources:
         - serviceAccountToken:
             expirationSeconds: 3607
             path: token
         - configMap:
             items:
             - key: ca.crt
               path: ca.crt
             name: kube-root-ca.crt
         - downwardAPI:
             items:
             - fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.namespace
               path: namespace
   status:
     conditions:
     - lastProbeTime: null
       lastTransitionTime: "2021-11-10T22:36:19Z"
       message: '0/1 nodes are available: 1 persistentvolumeclaim 
"volume-claim-name"
         not found.'
       reason: Unschedulable
       status: "False"
       type: PodScheduled
     phase: Pending
     qosClass: Burstable
   ```
   
   </details>
   
   <details><summary>With dynamic PVC: kubectl get pod acking-1 -o 
yaml</summary>
   
   ```yaml
   apiVersion: v1
   kind: Pod
   metadata:
     annotations:
       prometheus.io/port: "8080"
       prometheus.io/scrape: "true"
     creationTimestamp: "2021-11-10T22:39:57Z"
     generateName: acking-
     labels:
       app: heron
       controller-revision-hash: acking-59fdd8c858
       statefulset.kubernetes.io/pod-name: acking-1
       topology: acking
     name: acking-1
     namespace: default
     ownerReferences:
     - apiVersion: apps/v1
       blockOwnerDeletion: true
       controller: true
       kind: StatefulSet
       name: acking
       uid: dc5ae063-672f-4b1c-98cd-0936cd02a1b0
     resourceVersion: "1060"
     uid: cbe65fc2-cddf-496d-b633-68fad946c9b1
   spec:
     containers:
     - command:
       - sh
       - -c
       - './heron-core/bin/heron-downloader-config kubernetes && 
./heron-core/bin/heron-downloader
         
distributedlog://zookeeper:2181/heronbkdl/acking-saad-tag-0--4583400108540459301.tar.gz
         . && SHARD_ID=${POD_NAME##*-} && echo shardId=${SHARD_ID} && 
./heron-core/bin/heron-executor
         --topology-name=acking 
--topology-id=acking1f62dd17-33f9-4a72-98e7-daca680e9f16
         --topology-defn-file=acking.defn 
--state-manager-connection=zookeeper:2181 --state-manager-root=/heron
         --state-manager-config-file=./heron-conf/statemgr.yaml 
--tmanager-binary=./heron-core/bin/heron-tmanager
         --stmgr-binary=./heron-core/bin/heron-stmgr 
--metrics-manager-classpath=./heron-core/lib/metricsmgr/*
         
--instance-jvm-opts="LVhYOitIZWFwRHVtcE9uT3V0T2ZNZW1vcnlFcnJvcg(61)(61)" 
--classpath=heron-api-examples.jar
         --heron-internals-config-file=./heron-conf/heron_internals.yaml 
--override-config-file=./heron-conf/override.yaml
         --component-ram-map=exclaim1:1073741824,word:1073741824 
--component-jvm-opts=""
         --pkg-type=jar --topology-binary-file=heron-api-examples.jar 
--heron-java-home=$JAVA_HOME
         --heron-shell-binary=./heron-core/bin/heron-shell --cluster=kubernetes 
--role=saad
         --environment=default --instance-classpath=./heron-core/lib/instance/* 
--metrics-sinks-config-file=./heron-conf/metrics_sinks.yaml
         
--scheduler-classpath=./heron-core/lib/scheduler/*:./heron-core/lib/packing/*:./heron-core/lib/statemgr/*
         --python-instance-binary=./heron-core/bin/heron-python-instance 
--cpp-instance-binary=./heron-core/bin/heron-cpp-instance
         --metricscache-manager-classpath=./heron-core/lib/metricscachemgr/* 
--metricscache-manager-mode=disabled
         --is-stateful=false 
--checkpoint-manager-classpath=./heron-core/lib/ckptmgr/*:./heron-core/lib/statefulstorage/*:
         --stateful-config-file=./heron-conf/stateful.yaml 
--checkpoint-manager-ram=1073741824
         --health-manager-mode=disabled 
--health-manager-classpath=./heron-core/lib/healthmgr/*
         --shard=$SHARD_ID --server-port=6001 --tmanager-controller-port=6002 
--tmanager-stats-port=6003
         --shell-port=6004 --metrics-manager-port=6005 --scheduler-port=6006 
--metricscache-manager-server-port=6007
         --metricscache-manager-stats-port=6008 --checkpoint-manager-port=6009'
       env:
       - name: HOST
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: status.podIP
       - name: POD_NAME
         valueFrom:
           fieldRef:
             apiVersion: v1
             fieldPath: metadata.name
       - name: var_one
         value: variable one
       - name: var_three
         value: variable three
       - name: var_two
         value: variable two
       image: apache/heron:testbuild
       imagePullPolicy: IfNotPresent
       name: executor
       ports:
       - containerPort: 5555
         name: tcp-port-kept
         protocol: TCP
       - containerPort: 5556
         name: udp-port-kept
         protocol: UDP
       - containerPort: 6001
         name: server
         protocol: TCP
       - containerPort: 6002
         name: tmanager-ctl
         protocol: TCP
       - containerPort: 6003
         name: tmanager-stats
         protocol: TCP
       - containerPort: 6004
         name: shell-port
         protocol: TCP
       - containerPort: 6005
         name: metrics-mgr
         protocol: TCP
       - containerPort: 6006
         name: scheduler
         protocol: TCP
       - containerPort: 6007
         name: metrics-cache-m
         protocol: TCP
       - containerPort: 6008
         name: metrics-cache-s
         protocol: TCP
       - containerPort: 6009
         name: ckptmgr
         protocol: TCP
       resources:
         limits:
           cpu: "3"
           memory: 4Gi
         requests:
           cpu: "3"
           memory: 4Gi
       securityContext:
         allowPrivilegeEscalation: false
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: path/to/mount
         name: volumenameofchoice
         subPath: sub/path/to/mount
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-njblm
         readOnly: true
     - image: alpine
       imagePullPolicy: Always
       name: sidecar-container
       resources: {}
       terminationMessagePath: /dev/termination-log
       terminationMessagePolicy: File
       volumeMounts:
       - mountPath: /shared_volume
         name: shared-volume
       - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
         name: kube-api-access-njblm
         readOnly: true
     dnsPolicy: ClusterFirst
     enableServiceLinks: true
     hostname: acking-1
     preemptionPolicy: PreemptLowerPriority
     priority: 0
     restartPolicy: Always
     schedulerName: default-scheduler
     securityContext: {}
     serviceAccount: default
     serviceAccountName: default
     subdomain: acking
     terminationGracePeriodSeconds: 0
     tolerations:
     - effect: NoExecute
       key: node.kubernetes.io/not-ready
       operator: Exists
       tolerationSeconds: 10
     - effect: NoExecute
       key: node.kubernetes.io/unreachable
       operator: Exists
       tolerationSeconds: 10
     volumes:
     - emptyDir: {}
       name: shared-volume
     - name: volumenameofchoice
       persistentVolumeClaim:
         claimName: ondemand-acking-volumenameofchoice
     - name: kube-api-access-njblm
       projected:
         defaultMode: 420
         sources:
         - serviceAccountToken:
             expirationSeconds: 3607
             path: token
         - configMap:
             items:
             - key: ca.crt
               path: ca.crt
             name: kube-root-ca.crt
         - downwardAPI:
             items:
             - fieldRef:
                 apiVersion: v1
                 fieldPath: metadata.namespace
               path: namespace
   status:
     conditions:
     - lastProbeTime: null
       lastTransitionTime: "2021-11-10T22:39:57Z"
       message: '0/1 nodes are available: 1 pod has unbound immediate 
PersistentVolumeClaims.'
       reason: Unschedulable
       status: "False"
       type: PodScheduled
     phase: Pending
     qosClass: Burstable
   ```
   
   </details>
   
   <details><summary>kubectl get persistentvolumeclaims 
ondemand-acking-volumenameofchoice -o yaml</summary>
   
   ```yaml
   apiVersion: v1
   kind: PersistentVolumeClaim
   metadata:
     creationTimestamp: "2021-11-10T22:39:57Z"
     finalizers:
     - kubernetes.io/pvc-protection
     labels:
       onDemand: "true"
       topology: acking
     name: ondemand-acking-volumenameofchoice
     namespace: default
     resourceVersion: "1048"
     uid: 34a9bb66-db51-4abe-bc65-a8b45eef1593
   spec:
     accessModes:
     - ReadWriteOnce
     - ReadOnlyMany
     resources:
       requests:
         storage: 555Gi
     storageClassName: storage-class-name
     volumeMode: Block
     volumeName: volumenameofchoice
   status:
     phase: Pending
   ```
   
   <details>
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to