yceoshda opened a new issue #47:
URL: https://github.com/apache/pulsar-helm-chart/issues/47
While trying to upgrade our 2.5.0 deployment to 2.6.0 we encountered an
issue.
**Describe the bug**
When using Helm to upgrade from 2.5.0 to 2.6.0, the process fails on the
Init Jobs.
**To Reproduce**
Steps to reproduce the behavior:
1. Deploy a 2.5.0 version of Pulsar using Helm
2. Upgrade it to 2.6.0 still using Helm
3. Get the (nasty/cryptic) following error message
```text
Error: UPGRADE FAILED: cannot patch "pulsar-staging-bookie-init" with kind
Job: Job.batch "pulsar-staging-bookie-init" is invalid: spec.template: Invalid
value: core.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"",
Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0,
CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0,
loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil),
DeletionGracePeriodSeconds:(*int64)(nil),
Labels:map[string]string{"controller-uid":"1951b64b-f3ff-42cf-ad9b-ed1dea7dfd08",
"job-name":"pulsar-staging-bookie-init"}, Annotations:map[string]string(nil),
OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil),
ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)},
Spec:core.PodSpec{Volumes:[]core.Volume(nil),
InitContainers:[]core.Container{core.Container{Name:"wait-zookeeper-ready",
Image:"apachepulsar/pulsar-all:2.6.0", Command:[]string{"sh", "-c"},
Args:[]string{"until nslookup pulsar-staging-zook
eeper-2.pulsar-staging-zookeeper.pulsar; do\n sleep 3;\ndone;"},
WorkingDir:"", Ports:[]core.ContainerPort(nil),
EnvFrom:[]core.EnvFromSource(nil), Env:[]core.EnvVar(nil),
Resources:core.ResourceRequirements{Limits:core.ResourceList(nil),
Requests:core.ResourceList(nil)}, VolumeMounts:[]core.VolumeMount(nil),
VolumeDevices:[]core.VolumeDevice(nil), LivenessProbe:(*core.Probe)(nil),
ReadinessProbe:(*core.Probe)(nil), StartupProbe:(*core.Probe)(nil),
Lifecycle:(*core.Lifecycle)(nil),
TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File",
ImagePullPolicy:"IfNotPresent", SecurityContext:(*core.SecurityContext)(nil),
Stdin:false, StdinOnce:false, TTY:false}},
Containers:[]core.Container{core.Container{Name:"pulsar-staging-bookie-init",
Image:"apachepulsar/pulsar-all:2.6.0", Command:[]string{"sh", "-c"},
Args:[]string{"bin/apply-config-from-env.py conf/bookkeeper.conf;\nif
bin/bookkeeper shell whatisinstanceid; then\n echo \"bookkeeper cluster
already initiali
zed\";\nelse\n bin/bookkeeper shell initnewcluster;\nfi\n"}, WorkingDir:"",
Ports:[]core.ContainerPort(nil),
EnvFrom:[]core.EnvFromSource{core.EnvFromSource{Prefix:"",
ConfigMapRef:(*core.ConfigMapEnvSource)(0xc010895aa0),
SecretRef:(*core.SecretEnvSource)(nil)}}, Env:[]core.EnvVar(nil),
Resources:core.ResourceRequirements{Limits:core.ResourceList(nil),
Requests:core.ResourceList(nil)}, VolumeMounts:[]core.VolumeMount(nil),
VolumeDevices:[]core.VolumeDevice(nil), LivenessProbe:(*core.Probe)(nil),
ReadinessProbe:(*core.Probe)(nil), StartupProbe:(*core.Probe)(nil),
Lifecycle:(*core.Lifecycle)(nil),
TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File",
ImagePullPolicy:"IfNotPresent", SecurityContext:(*core.SecurityContext)(nil),
Stdin:false, StdinOnce:false, TTY:false}},
EphemeralContainers:[]core.EphemeralContainer(nil), RestartPolicy:"Never",
TerminationGracePeriodSeconds:(*int64)(0xc008116c80),
ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst
", NodeSelector:map[string]string(nil), ServiceAccountName:"",
AutomountServiceAccountToken:(*bool)(nil), NodeName:"",
SecurityContext:(*core.PodSecurityContext)(0xc0158c69a0),
ImagePullSecrets:[]core.LocalObjectReference(nil), Hostname:"", Subdomain:"",
Affinity:(*core.Affinity)(nil), SchedulerName:"default-scheduler",
Tolerations:[]core.Toleration(nil), HostAliases:[]core.HostAlias(nil),
PriorityClassName:"", Priority:(*int32)(nil),
PreemptionPolicy:(*core.PreemptionPolicy)(nil),
DNSConfig:(*core.PodDNSConfig)(nil),
ReadinessGates:[]core.PodReadinessGate(nil), RuntimeClassName:(*string)(nil),
Overhead:core.ResourceList(nil), EnableServiceLinks:(*bool)(nil),
TopologySpreadConstraints:[]core.TopologySpreadConstraint(nil)}}: field is
immutable && cannot patch "pulsar-staging-pulsar-init" with kind Job: Job.batch
"pulsar-staging-pulsar-init" is invalid: spec.template: Invalid value:
core.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"",
Namespace:"", SelfLink:"", UID:
"", ResourceVersion:"", Generation:0,
CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0,
loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil),
DeletionGracePeriodSeconds:(*int64)(nil),
Labels:map[string]string{"controller-uid":"f967eb35-a5ce-4a44-97b2-12d263b2df4c",
"job-name":"pulsar-staging-pulsar-init"}, Annotations:map[string]string(nil),
OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil),
ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)},
Spec:core.PodSpec{Volumes:[]core.Volume(nil),
InitContainers:[]core.Container{core.Container{Name:"wait-zookeeper-ready",
Image:"apachepulsar/pulsar-all:2.6.0", Command:[]string{"sh", "-c"},
Args:[]string{"until nslookup
pulsar-staging-zookeeper-2.pulsar-staging-zookeeper.pulsar; do\n sleep
3;\ndone;"}, WorkingDir:"", Ports:[]core.ContainerPort(nil),
EnvFrom:[]core.EnvFromSource(nil), Env:[]core.EnvVar(nil),
Resources:core.ResourceRequirements{Limits:core.ResourceList(nil),
Requests:core.ResourceList
(nil)}, VolumeMounts:[]core.VolumeMount(nil),
VolumeDevices:[]core.VolumeDevice(nil), LivenessProbe:(*core.Probe)(nil),
ReadinessProbe:(*core.Probe)(nil), StartupProbe:(*core.Probe)(nil),
Lifecycle:(*core.Lifecycle)(nil),
TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File",
ImagePullPolicy:"IfNotPresent", SecurityContext:(*core.SecurityContext)(nil),
Stdin:false, StdinOnce:false, TTY:false},
core.Container{Name:"pulsar-bookkeeper-verify-clusterid",
Image:"apachepulsar/pulsar-all:2.6.0", Command:[]string{"sh", "-c"},
Args:[]string{"bin/apply-config-from-env.py conf/bookkeeper.conf;\nuntil
bin/bookkeeper shell whatisinstanceid; do\n sleep 3;\ndone;\n"},
WorkingDir:"", Ports:[]core.ContainerPort(nil),
EnvFrom:[]core.EnvFromSource{core.EnvFromSource{Prefix:"",
ConfigMapRef:(*core.ConfigMapEnvSource)(0xc015b2bec0),
SecretRef:(*core.SecretEnvSource)(nil)}}, Env:[]core.EnvVar(nil),
Resources:core.ResourceRequirements{Limits:core.ResourceList(nil),
Requests:core.
ResourceList(nil)}, VolumeMounts:[]core.VolumeMount(nil),
VolumeDevices:[]core.VolumeDevice(nil), LivenessProbe:(*core.Probe)(nil),
ReadinessProbe:(*core.Probe)(nil), StartupProbe:(*core.Probe)(nil),
Lifecycle:(*core.Lifecycle)(nil),
TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File",
ImagePullPolicy:"IfNotPresent", SecurityContext:(*core.SecurityContext)(nil),
Stdin:false, StdinOnce:false, TTY:false}},
Containers:[]core.Container{core.Container{Name:"pulsar-staging-pulsar-init",
Image:"apachepulsar/pulsar-all:2.6.0", Command:[]string{"sh", "-c"},
Args:[]string{"\nbin/pulsar initialize-cluster-metadata \\\n --cluster
pulsar-staging \\\n --zookeeper pulsar-staging-zookeeper:2181 \\\n
--configuration-store pulsar-staging-zookeeper:2181 \\\n --web-service-url
http://pulsar-staging-broker.pulsar.svc.cluster.local:8080/ \\\n
--web-service-url-tls
https://pulsar-staging-broker.pulsar.svc.cluster.local:8443/ \\\n
--broker-service-url pulsar://pulsar-stagin
g-broker.pulsar.svc.cluster.local:6650/ \\\n --broker-service-url-tls
pulsar+ssl://pulsar-staging-broker.pulsar.svc.cluster.local:6651/ || true;\n"},
WorkingDir:"", Ports:[]core.ContainerPort(nil),
EnvFrom:[]core.EnvFromSource(nil), Env:[]core.EnvVar(nil),
Resources:core.ResourceRequirements{Limits:core.ResourceList(nil),
Requests:core.ResourceList(nil)}, VolumeMounts:[]core.VolumeMount(nil),
VolumeDevices:[]core.VolumeDevice(nil), LivenessProbe:(*core.Probe)(nil),
ReadinessProbe:(*core.Probe)(nil), StartupProbe:(*core.Probe)(nil),
Lifecycle:(*core.Lifecycle)(nil),
TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File",
ImagePullPolicy:"IfNotPresent", SecurityContext:(*core.SecurityContext)(nil),
Stdin:false, StdinOnce:false, TTY:false}},
EphemeralContainers:[]core.EphemeralContainer(nil), RestartPolicy:"Never",
TerminationGracePeriodSeconds:(*int64)(0xc017ff6e98),
ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst",
NodeSelector:map[string]string(
nil), ServiceAccountName:"", AutomountServiceAccountToken:(*bool)(nil),
NodeName:"", SecurityContext:(*core.PodSecurityContext)(0xc017c5a070),
ImagePullSecrets:[]core.LocalObjectReference(nil), Hostname:"", Subdomain:"",
Affinity:(*core.Affinity)(nil), SchedulerName:"default-scheduler",
Tolerations:[]core.Toleration(nil), HostAliases:[]core.HostAlias(nil),
PriorityClassName:"", Priority:(*int32)(nil),
PreemptionPolicy:(*core.PreemptionPolicy)(nil),
DNSConfig:(*core.PodDNSConfig)(nil),
ReadinessGates:[]core.PodReadinessGate(nil), RuntimeClassName:(*string)(nil),
Overhead:core.ResourceList(nil), EnableServiceLinks:(*bool)(nil),
TopologySpreadConstraints:[]core.TopologySpreadConstraint(nil)}}: field is
immutable
```
**Expected behavior**
Helm upgrade completing successfully.
**Additional context**
After a few googleing, it seems that changing a container image inside a Job
is not supported (as it turns out trying to change the image using a command
like `kubectl edit jobs.batch pulsar-pulsar-init` yield the same error message.
Our workaround was to delete the 2 jobs pre-upgrade. As it seems to be a
Kubernetes limitation, the upgrade doc should probably be updated to avoid
confusion.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]