GitHub user duncaan added a comment to the discussion: Issue with TLS after 
upgrading from 2.8.x

I'm using the latest apache pulsar helm chart.  I've tried setting 
`brokerClientTrustCertsFilePath` and `tlsTrustCertsFilePath` on the toolset 
pod, but I'm not sure it made much of a difference.  I also set 
`tlsTrustCertsFilePath` to the CA provided by the SSL certificate provider.  
I'm really unsure where to go from here.
Here's my helm values:
```

namespace: ""
namespaceCreate: false
nameOverride: pulsar
## clusterDomain as defined for your k8s cluster
clusterDomain: cluster.local
###
### Global Settings
###

## Set to true on install
initialize: false
## Set cluster name
# clusterName:

## add custom labels to components of cluster
# labels:
#   environment: dev
#   customer: apache


metadataPrefix: ""

tcpPrefix: "" # For Istio this will be "tcp-"
tlsPrefix: "" # For Istio this will be "tls-"

## Deprecated in favor of using `volumes.persistence`
persistence: true
## Volume settings
volumes:
  persistence: true
  # configure the components to use local persistent volume
  # the local provisioner should be installed prior to enable local persistent 
volume
  local_storage: false
## RBAC
##
## Configure settings related to RBAC such as limiting broker access to single
## namespece or enabling PSP
rbac:
  enabled: false
  psp: false
  limit_to_namespace: false
affinity:
  anti_affinity: true
  # Set the anti affinity type. Valid values:
  # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for pod 
to be scheduled (hard) requires at least one node per replica
  # preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to 
enforce but not guranentee
  type: requiredDuringSchedulingIgnoredDuringExecution
kube-prometheus-stack:
  enabled: false
  prometheusOperator:
    enabled: false
  grafana:
    enabled: false
  alertmanager:
    enabled: false
  prometheus:
    enabled: false
## Components
##
## Control what components of Apache Pulsar to deploy for the cluster
components:
  # zookeeper
  zookeeper: true
  # bookkeeper
  bookkeeper: true
  # Bookkeeper Autorecovery
  autorecovery: true
  # broker
  broker: true
  # functions
  functions: true
  # proxy
  proxy: true
  # toolset
  toolset: true
  # pulsar manager
  pulsar_manager: true
# Disable in-cluster prometheus, grafana, alert-manager, node_exporter
monitoring:
  # monitoring - prometheus
  prometheus: false
  # monitoring - grafana
  grafana: false
  # monitoring - node_exporter
  node_exporter: false
  # alerting - alert-manager
  alert_manager: false

## Images
##
## Control what images to use for each component
images:
  zookeeper:
    tag: 3.0.7
  bookie:
    tag: 3.0.7
  autorecovery:
    tag: 3.0.7
  broker:
    tag: 3.0.7
  proxy:
    tag: 3.0.7
  functions:
    tag: 3.0.7
  pulsar_manager:
    tag: v0.4.0
    hasCommand: false
## TLS
## templates/tls-certs.yaml
##
## The chart is using cert-manager for provisioning TLS certs for
## brokers and proxies.
tls:
  enabled: true
  ca_suffix: ca-tls
  # common settings for generating certs
  common:
    # 365 days
    duration: 8760h0m0s
    # 30 days
    renewBefore: 720h0m0s
    organization:
      - pulsar
    keySize: 4096
    keyAlgorithm: rsa
    keyEncoding: pkcs8
  # settings for generating certs for proxy
  proxy:
    enabled: true
    cert_name: tls
  # settings for generating certs for broker
  broker:
    enabled: true
    cert_name: tls-broker
  # settings for generating certs for bookies
  bookie:
    enabled: true
    cert_name: tls-bookie
  # settings for generating certs for zookeeper
  zookeeper:
    enabled: true
    cert_name: tls-zookeeper
  # settings for generating certs for recovery
  autorecovery:
    cert_name: tls-recovery
  # settings for generating certs for toolset
  toolset:
    cert_name: tls-toolset
# Enable or disable broker authentication and authorization.
auth:
  authentication:
    enabled: true
    provider: "jwt"
    jwt:
      # Enable JWT authentication
      # If the token is generated by a secret key, set the usingSecretKey as 
true.
      # If the token is generated by a private key, set the usingSecretKey as 
false.
      usingSecretKey: false
  authorization:
    enabled: true
  superUsers:
    # broker to broker communication
    broker: "broker-admin"
    # proxy to broker communication
    proxy: "proxy-admin"
    # pulsar-admin client to broker/proxy communication
    client: "admin"
    manager: "admin"
######################################################################
# External dependencies
######################################################################

## cert-manager
## templates/tls-cert-issuer.yaml
##
## Cert manager is used for automatically provisioning TLS certificates
## for components within a Pulsar cluster
certs:
  internal_issuer:
    apiVersion: cert-manager.io/v1
    enabled: true
    component: internal-cert-issuer
    type: selfsigning
    # 365 days
    duration: 8760h0m0s
    # 30 days
    renewBefore: 720h0m0s
  issuers:
    selfsigning:
######################################################################
# Below are settings for each component
######################################################################

## Pulsar: Zookeeper cluster
## templates/zookeeper-statefulset.yaml
##
zookeeper:
  # use a component name that matches your grafana configuration
  # so the metrics are correctly rendered in grafana dashboard
  component: zookeeper
  # the number of zookeeper servers to run. it should be an odd number larger 
than or equal to 3.
  replicaCount: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  # If using Prometheus-Operator enable this PodMonitor to discover zookeeper 
scrape targets
  # Prometheus-Operator does not add scrape targets based on k8s annotations
  podMonitor:
    enabled: false
    interval: 10s
    scrapeTimeout: 10s
  # True includes annotation for statefulset that contains hash of 
corresponding configmap, which will cause pods to restart on configmap change
  restartPodsOnConfigMapChange: false
  ports:
    http: 8000
    client: 2181
    clientTls: 2281
    follower: 2888
    leaderElection: 3888
  # nodeSelector:
  # cloud.google.com/gke-nodepool: default-pool
  probe:
    liveness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 20
      periodSeconds: 30
      timeoutSeconds: 30
    readiness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 20
      periodSeconds: 30
      timeoutSeconds: 30
    startup:
      enabled: false
      failureThreshold: 30
      initialDelaySeconds: 20
      periodSeconds: 30
      timeoutSeconds: 30
  affinity:
    anti_affinity: true
    # Set the anti affinity type. Valid values:
    # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for 
pod to be scheduled (hard) requires at least one node per replica
    # preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to 
enforce but not guranentee
    type: requiredDuringSchedulingIgnoredDuringExecution
  annotations:
    prometheus.io/scrape: "true"
    prometheus.io/port: "8000"
  tolerations:
    - key: "service"
      operator: "Equal"
      value: "pulsar"
      effect: "NoSchedule"
  nodeSelector:
    service: "pulsar"
  gracePeriod: 30
  resources:
    requests:
      memory: 256Mi
      cpu: 0.1
  # extraVolumes and extraVolumeMounts allows you to mount other volumes
  # Example Use Case: mount ssl certificates
  # extraVolumes:
  #   - name: ca-certs
  #     secret:
  #       defaultMode: 420
  #       secretName: ca-certs
  # extraVolumeMounts:
  #   - mountPath: /pulsar/custom/log4j2.yaml
  #     name: logger-config
  #     subPath: log4j2.yaml
  extraVolumes: []
  extraVolumeMounts: []
  # Ensures 2.10.0 non-root docker image works correctly.
  securityContext:
    fsGroup: 0
    fsGroupChangePolicy: "OnRootMismatch"
  volumes:
    # use a persistent volume or emptyDir
    persistence: true
    data:
      name: data
      size: 20Gi
      local_storage: false
      ## If you already have an existent storage class and want to reuse it, 
you can specify its name with the option below
      ##
      storageClassName: pulsar-standard
      #
      ## Instead if you want to create a new storage class define it below
      ## If left undefined no storage class will be defined along with PVC
      ##
      # storageClass:
      # type: pd-ssd
      # fsType: xfs
      # provisioner: kubernetes.io/gce-pd
    useSingleCommonVolume: true
  ## Zookeeper configmap
  ## templates/zookeeper-configmap.yaml
  ##
  configData:
    PULSAR_MEM: >
      -Xms64m -Xmx128m

    PULSAR_GC: >
      -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -Dcom.sun.management.jmxremote 
-Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled 
-XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC 
-XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem

  ## Add a custom command to the start up process of the zookeeper pods (e.g. 
update-ca-certificates, jvm commands, etc)
  additionalCommand:
  ## Zookeeper service
  ## templates/zookeeper-service.yaml
  ##
  service:
    annotations:
      service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
  ## Zookeeper PodDisruptionBudget
  ## templates/zookeeper-pdb.yaml
  ##
  pdb:
    usePolicy: true
    maxUnavailable: 1
  customLoggingEnabled: true
## Pulsar: Bookkeeper cluster
## templates/bookkeeper-statefulset.yaml
##
bookkeeper:
  # use a component name that matches your grafana configuration
  # so the metrics are correctly rendered in grafana dashboard
  component: bookie
  ## BookKeeper Cluster Initialize
  ## templates/bookkeeper-cluster-initialize.yaml
  metadata:
    ## Set the resources used for running `bin/bookkeeper shell initnewcluster`
    ##
    resources:
    # requests:
    # memory: 4Gi
    # cpu: 2
  replicaCount: 4
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  # If using Prometheus-Operator enable this PodMonitor to discover bookie 
scrape targets
  # Prometheus-Operator does not add scrape targets based on k8s annotations
  podMonitor:
    enabled: false
    interval: 10s
    scrapeTimeout: 10s
  # True includes annotation for statefulset that contains hash of 
corresponding configmap, which will cause pods to restart on configmap change
  restartPodsOnConfigMapChange: false
  ports:
    http: 8000
    bookie: 3181
  # nodeSelector:
  # cloud.google.com/gke-nodepool: default-pool
  probe:
    liveness:
      enabled: true
      failureThreshold: 60
      initialDelaySeconds: 10
      periodSeconds: 30
      timeoutSeconds: 5
    readiness:
      enabled: true
      failureThreshold: 60
      initialDelaySeconds: 10
      periodSeconds: 30
      timeoutSeconds: 5
    startup:
      enabled: false
      failureThreshold: 30
      initialDelaySeconds: 60
      periodSeconds: 30
      timeoutSeconds: 5
  affinity:
    anti_affinity: true
    # Set the anti affinity type. Valid values:
    # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for 
pod to be scheduled (hard) requires at least one node per replica
    # preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to 
enforce but not guranentee
    type: requiredDuringSchedulingIgnoredDuringExecution
  annotations: {}
  tolerations:
    - key: "service"
      operator: "Equal"
      value: "pulsar"
      effect: "NoSchedule"
  nodeSelector:
    service: "pulsar"
  gracePeriod: 30
  resources:
    requests:
      memory: 512Mi
      cpu: 0.2
  # extraVolumes and extraVolumeMounts allows you to mount other volumes
  # Example Use Case: mount ssl certificates
  # extraVolumes:
  #   - name: ca-certs
  #     secret:
  #       defaultMode: 420
  #       secretName: ca-certs
  # extraVolumeMounts:
  #   - name: ca-certs
  #     mountPath: /certs
  #     readOnly: true
  extraVolumes: []
  extraVolumeMounts: []
  # Ensures 2.10.0 non-root docker image works correctly.
  securityContext:
    fsGroup: 0
    fsGroupChangePolicy: "OnRootMismatch"
  volumes:
    # use a persistent volume or emptyDir
    persistence: true
    journal:
      name: journal
      size: 10Gi
      local_storage: false
      ## If you already have an existent storage class and want to reuse it, 
you can specify its name with the option below
      ##
      # storageClassName: existent-storage-class
      #
      ## Instead if you want to create a new storage class define it below
      ## If left undefined no storage class will be defined along with PVC
      ##
      # storageClass:
      # type: pd-ssd
      # fsType: xfs
      # provisioner: kubernetes.io/gce-pd
      useMultiVolumes: false
      multiVolumes:
        - name: journal0
          size: 10Gi
          # storageClassName: existent-storage-class
          mountPath: /pulsar/data/bookkeeper/journal0
        - name: journal1
          size: 10Gi
          # storageClassName: existent-storage-class
          mountPath: /pulsar/data/bookkeeper/journal1
      storageClassName: pulsar-standard
    ledgers:
      name: ledgers
      size: 50Gi
      local_storage: false
      # storageClassName:
      # storageClass:
      # ...
      useMultiVolumes: false
      multiVolumes:
        - name: ledgers0
          size: 10Gi
          # storageClassName: existent-storage-class
          mountPath: /pulsar/data/bookkeeper/ledgers0
        - name: ledgers1
          size: 10Gi
          # storageClassName: existent-storage-class
          mountPath: /pulsar/data/bookkeeper/ledgers1
      storageClassName: pulsar-standard
    ## use a single common volume for both journal and ledgers
    useSingleCommonVolume: false
    common:
      name: common
      size: 60Gi
      local_storage: true
      # storageClassName:
      # storageClass: ## this is common too
      # ...
  ## Bookkeeper configmap
  ## templates/bookkeeper-configmap.yaml
  ##
  configData:
    # we use `bin/pulsar` for starting bookie daemons
    PULSAR_MEM: >
      -Xms128m -Xmx256m -XX:MaxDirectMemorySize=256m

    PULSAR_GC: >
      -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled 
-XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=4 
-XX:ConcGCThreads=4 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC 
-XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -Xlog:gc* 
-Xlog:gc::utctime -Xlog:safepoint -Xlog:gc+heap=trace -verbosegc

    # configure the memory settings based on jvm memory settings
    dbStorage_writeCacheMaxSizeMb: "32"
    dbStorage_readAheadCacheMaxSizeMb: "32"
    dbStorage_rocksDB_writeBufferSizeMB: "8"
    dbStorage_rocksDB_blockCacheSize: "8388608"
    PULSAR_LOG_CONF: "/pulsar/custom/log4j2.yaml"
    numAddWorkerThreads: "8" # Number of threads that should handle write 
requests. if zero, the writes wouldbe handled by netty threads directly.
  ## Add a custom command to the start up process of the bookie pods (e.g. 
update-ca-certificates, jvm commands, etc)
  additionalCommand:
  ## Bookkeeper Service
  ## templates/bookkeeper-service.yaml
  ##
  service:
    spec:
      publishNotReadyAddresses: true
  ## Bookkeeper PodDisruptionBudget
  ## templates/bookkeeper-pdb.yaml
  ##
  pdb:
    usePolicy: true
    maxUnavailable: 1
  customLoggingEnabled: true
## Pulsar: Bookkeeper AutoRecovery
## templates/autorecovery-statefulset.yaml
##
autorecovery:
  # use a component name that matches your grafana configuration
  # so the metrics are correctly rendered in grafana dashboard
  component: recovery
  replicaCount: 1
  # If using Prometheus-Operator enable this PodMonitor to discover 
autorecovery scrape targets
  # # Prometheus-Operator does not add scrape targets based on k8s annotations
  podMonitor:
    enabled: false
    interval: 10s
    scrapeTimeout: 10s
  # True includes annotation for statefulset that contains hash of 
corresponding configmap, which will cause pods to restart on configmap change
  restartPodsOnConfigMapChange: false
  ports:
    http: 8000
  # nodeSelector:
  # cloud.google.com/gke-nodepool: default-pool
  affinity:
    anti_affinity: true
    # Set the anti affinity type. Valid values:
    # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for 
pod to be scheduled (hard) requires at least one node per replica
    # preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to 
enforce but not guranentee
    type: requiredDuringSchedulingIgnoredDuringExecution
  annotations: {}
  # tolerations: []
  gracePeriod: 30
  resources:
    requests:
      memory: 1Gi
      cpu: 0.05
    limits:
      memory: 2Gi
  ## Bookkeeper auto-recovery configmap
  ## templates/autorecovery-configmap.yaml
  ##
  configData:
    BOOKIE_MEM: >-
      -Xms1G -Xmx1G
    PULSAR_PREFIX_useV2WireProtocol: "true"
  tolerations:
    - key: "service"
      operator: "Equal"
      value: "pulsar"
      effect: "NoSchedule"
  nodeSelector:
    service: "pulsar"
## Pulsar Zookeeper metadata. The metadata will be deployed as
## soon as the last zookeeper node is reachable. The deployment
## of other components that depends on zookeeper, such as the
## bookkeeper nodes, broker nodes, etc will only start to be
## deployed when the zookeeper cluster is ready and with the
## metadata deployed
pulsar_metadata:
  component: pulsar-init
  image:
    # the image used for running `pulsar-cluster-initialize` job
    tag: 3.0.7
    pullPolicy: IfNotPresent
  ## set an existing configuration store
  # configurationStore:
  configurationStoreMetadataPrefix: ""
  configurationStorePort: 2181
  ## optional, you can provide your own zookeeper metadata store for other 
components
  # to use this, you should explicit set components.zookeeper to false
  #
  # userProvidedZookeepers: "zk01.example.com:2181,zk02.example.com:2181"
# Can be used to run extra commands in the initialization jobs e.g. to quit 
istio sidecars etc.
extraInitCommand: ""
## Pulsar: Broker cluster
## templates/broker-statefulset.yaml
##
broker:
  # use a component name that matches your grafana configuration
  # so the metrics are correctly rendered in grafana dashboard
  component: broker
  replicaCount: 3
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    metrics: ~
  # If using Prometheus-Operator enable this PodMonitor to discover broker 
scrape targets
  # Prometheus-Operator does not add scrape targets based on k8s annotations
  podMonitor:
    enabled: false
    interval: 10s
    scrapeTimeout: 10s
  # True includes annotation for statefulset that contains hash of 
corresponding configmap, which will cause pods to restart on configmap change
  restartPodsOnConfigMapChange: false
  ports:
    http: 8080
    https: 8443
    pulsar: 6650
    pulsarssl: 6651
  # nodeSelector:
  # cloud.google.com/gke-nodepool: default-pool
  probe:
    liveness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 30
      periodSeconds: 10
      timeoutSeconds: 5
    readiness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 30
      periodSeconds: 10
      timeoutSeconds: 5
    startup:
      enabled: false
      failureThreshold: 30
      initialDelaySeconds: 60
      periodSeconds: 10
      timeoutSeconds: 5
  affinity:
    anti_affinity: true
    # Set the anti affinity type. Valid values:
    # requiredDuringSchedulingIgnoredDuringExecution - rules must be met for 
pod to be scheduled (hard) requires at least one node per replica
    # preferredDuringSchedulingIgnoredDuringExecution - scheduler will try to 
enforce but not guranentee
    type: preferredDuringSchedulingIgnoredDuringExecution
  annotations: {}
  tolerations:
    - key: "service"
      operator: "Equal"
      value: "pulsar"
      effect: "NoSchedule"
  nodeSelector:
    service: "pulsar"
  gracePeriod: 30
  resources:
    requests:
      memory: "3Gi"
      cpu: 0.2
    limits:
      memory: "3Gi"
  # extraVolumes and extraVolumeMounts allows you to mount other volumes
  # Example Use Case: mount ssl certificates
  # extraVolumes:
  #   - name: ca-certs
  #     secret:
  #       defaultMode: 420
  #       secretName: ca-certs
  # extraVolumeMounts:
  #   - name: ca-certs
  #     mountPath: /certs
  #     readOnly: true
  extraVolumes: []
  extraVolumeMounts: []
  extreEnvs: []
  #    - name: POD_NAME
  #      valueFrom:
  #        fieldRef:
  #          apiVersion: v1
  #          fieldPath: metadata.name
  ## Broker configmap
  ## templates/broker-configmap.yaml
  ##
  configData:
    PULSAR_MEM: >
      -Xms1G -Xmx1G -XX:MaxDirectMemorySize=1G

    PULSAR_GC: >
      -XX:+UseG1GC -XX:MaxGCPauseMillis=10 
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 
-XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions 
-XX:+DoEscapeAnalysis -XX:ParallelGCThreads=4 -XX:ConcGCThreads=4 
-XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:+ExitOnOutOfMemoryError 
-XX:+PerfDisableSharedMem

    managedLedgerDefaultEnsembleSize: "2"
    managedLedgerDefaultWriteQuorum: "2"
    managedLedgerDefaultAckQuorum: "2"
    defaultNumberOfNamespaceBundles: "9" #should be a multiple of the number of 
brokers we are running
    PULSAR_PREFIX_s3ManagedLedgerOffloadRole: #redacted
    PULSAR_PREFIX_s3ManagedLedgerOffloadRoleSessionName: #redacted
    PULSAR_PREFIX_s3ManagedLedgerOffloadBucket: #redacted
    allowAutoTopicCreation: "false"
    brokerClientTlsEnabled: "true"
    brokerDeleteInactiveTopicsEnabled: "false"
    loadBalancerAutoBundleSplitEnabled: "false" # Expected default is false, 
however the Docker image has this set to true and it can impact stability; Need 
to configure number of bundles for a namespace via pulsar-admin
    loadBalancerOverrideBrokerNicSpeedGbps: "2.5"
    managedLedgerCacheSizeMB: "1024" # This is the size of the on-broker 
managed ledger cache - 1GB
    managedLedgerCacheEvictionTimeThresholdMillis: "60000" # default is 1000
    managedLedgerNumWorkerThreads: "6"
    maxUnackedMessagesPerConsumer: "0" # Zero here means no limit
    maxUnackedMessagesPerSubscription: "0" # Zero here means no limit
    PULSAR_LOG_CONF: "/pulsar/custom/log4j2.yaml"
    replicationConnectionsPerBroker: "6"
    replicationTlsEnabled: "true"
  ## Add a custom command to the start up process of the broker pods (e.g. 
update-ca-certificates, jvm commands, etc)
  additionalCommand:
  ## Broker service
  ## templates/broker-service.yaml
  ##
  service:
    annotations: {}
  ## Broker PodDisruptionBudget
  ## templates/broker-pdb.yaml
  ##
  pdb:
    usePolicy: true
    maxUnavailable: 1
  ### Broker service account
  ## templates/broker-service-account.yaml
  service_account:
    annotations: {}
  hpa:
    enabled: false
  customLoggingEnabled: true

functions:
  component: functions-worker

proxy:
  component: proxy
  replicaCount: 3
  podMonitor:
    enabled: false
    interval: 10s
    scrapeTimeout: 10s
  restartPodsOnConfigMapChange: false
  probe:
    liveness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 30
      periodSeconds: 10
      timeoutSeconds: 5
    readiness:
      enabled: true
      failureThreshold: 10
      initialDelaySeconds: 30
      periodSeconds: 10
      timeoutSeconds: 5
    startup:
      enabled: false
      failureThreshold: 30
      initialDelaySeconds: 60
      periodSeconds: 10
      timeoutSeconds: 5
  affinity:
    anti_affinity: true
    type: requiredDuringSchedulingIgnoredDuringExecution
  annotations: {}
  tolerations:
    - key: "service"
      operator: "Equal"
      value: "pulsar"
      effect: "NoSchedule"
  nodeSelector:
    service: "pulsar"
  gracePeriod: 30
  resources:
    requests:
      memory: 128Mi
      cpu: 0.2
  configData:
    PULSAR_MEM: >
      -Xms64m -Xmx64m -XX:MaxDirectMemorySize=64m

    PULSAR_GC: >
      -XX:+UseG1GC -XX:MaxGCPauseMillis=10 
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 
-XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions 
-XX:+DoEscapeAnalysis -XX:ParallelGCThreads=4 -XX:ConcGCThreads=4 
-XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB 
-XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem

    httpNumThreads: "8"
    PULSAR_PREFIX_authenticateMetricsEndpoint: "false"
  ## Add a custom command to the start up process of the proxy pods (e.g. 
update-ca-certificates, jvm commands, etc)
  additionalCommand:
  ## Proxy service
  ## templates/proxy-service.yaml
  ##
  ports:
    http: 80
    https: 443
    pulsar: 6650
    pulsarssl: 6651
  service:
    annotations:
      external-dns-enabled: "true"
      service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
      service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
    type: LoadBalancer
  proxyTrustedCertSecret: "pulsar-staging-tls"

toolset:
  component: toolset
  useProxy: true
  replicaCount: 1
  restartPodsOnConfigMapChange: false
  annotations: {}
  tolerations: []
  gracePeriod: 30
  resources:
    requests:
      memory: 256Mi
      cpu: 0.1
  configData:
    PULSAR_MEM: >
      -Xms64M -Xmx128M -XX:MaxDirectMemorySize=128M

pulsar_manager:
  component: pulsar-manager
  replicaCount: 1
  restartPodsOnConfigMapChange: false
  gracePeriod: 30
  resources:
    requests:
      memory: 250Mi
      cpu: 0.1
  configData:
    REDIRECT_HOST: "http://127.0.0.1";
    REDIRECT_PORT: "9527"
    DRIVER_CLASS_NAME: org.postgresql.Driver
    URL: # redacted
    LOG_LEVEL: ERROR
  service:
    type: ClusterIP
    port: 9527
    targetPort: 9527
    annotations: {}
  ingress:
    enabled: false
  existingSecretName:
  admin:
    existingSecret: pulsar-manager-secret
  dbSecretName: "pulsar-postgresql-app"
  databaseCredentials:
    user: username
    password: password
job:
  ttl:
    enabled: false
    secondsAfterFinished: 3600
fullnameOverride: pulsar-staging
```

GitHub link: 
https://github.com/apache/pulsar/discussions/23408#discussioncomment-10897139

----
This is an automatically sent email for [email protected].
To unsubscribe, please send an email to: [email protected]

Reply via email to