1.  

helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update

2.

helm pull bitnami/kafka

3. 解压  tar -zxvf  kafka-18.0.8.tgz

 

4.编辑 values.yaml 

需要注意:

  storageClass: "managed-nfs-storage"  替换成自己的 nfs 动态存储 
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass

## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
  imageRegistry: ""
  ## E.g.
  ## imagePullSecrets:
  ##   - myRegistryKeySecretName
  ##
  imagePullSecrets: []
  storageClass: "managed-nfs-storage"

## @section Common parameters

## @param kubeVersion Override Kubernetes version
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param clusterDomain Default Kubernetes cluster domain
##
clusterDomain: cluster.local
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## Enable diagnostic mode in the statefulset
##
diagnosticMode:
  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
  ##
  enabled: false
  ## @param diagnosticMode.command Command to override all containers in the statefulset
  ##
  command:
    - sleep
  ## @param diagnosticMode.args Args to override all containers in the statefulset
  ##
  args:
    - infinity

## @section Kafka parameters

## Bitnami Kafka image version
## ref: https://hub.docker.com/r/bitnami/kafka/tags/
## @param image.registry Kafka image registry
## @param image.repository Kafka image repository
## @param image.tag Kafka image tag (immutable tags are recommended)
## @param image.pullPolicy Kafka image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
## @param image.debug Specify if debug values should be set
##
image:
  registry: docker.io
  repository: bitnami/kafka
  tag: 3.2.1-debian-11-r4
  ## Specify a imagePullPolicy
  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  ##
  pullPolicy: IfNotPresent
  ## Optionally specify an array of imagePullSecrets.
  ## Secrets must be manually created in the namespace.
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  ## e.g:
  ## pullSecrets:
  ##   - myRegistryKeySecretName
  ##
  pullSecrets: []
  ## Set to true if you would like to see extra information on logs
  ##
  debug: false
## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified
## Specify content for server.properties
## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart)
## The server.properties is auto-generated based on other parameters when this parameter is not specified
## e.g:
## config: |-
##   broker.id=-1
##   listeners=PLAINTEXT://:9092
##   advertised.listeners=PLAINTEXT://KAFKA_IP:9092
##   num.network.threads=3
##   num.io.threads=8
##   socket.send.buffer.bytes=102400
##   socket.receive.buffer.bytes=102400
##   socket.request.max.bytes=104857600
##   log.dirs=/bitnami/kafka/data
##   num.partitions=1
##   num.recovery.threads.per.data.dir=1
##   offsets.topic.replication.factor=1
##   transaction.state.log.replication.factor=1
##   transaction.state.log.min.isr=1
##   log.flush.interval.messages=10000
##   log.flush.interval.ms=1000
##   log.retention.hours=168
##   log.retention.bytes=1073741824
##   log.segment.bytes=1073741824
##   log.retention.check.interval.ms=300000
##   zookeeper.connect=ZOOKEEPER_SERVICE_NAME
##   zookeeper.connection.timeout.ms=6000
##   group.initial.rebalance.delay.ms=0
##
config: ""
## @param existingConfigmap ConfigMap with Kafka Configuration
## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables
##
existingConfigmap: ""
## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers
## An optional log4j.properties file to overwrite the default of the Kafka brokers
## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties
##
log4j: ""
## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file
## The name of an existing ConfigMap containing a log4j.properties file
## NOTE: this will override `log4j`
##
existingLog4jConfigMap: ""
## @param heapOpts Kafka Java Heap size
##
heapOpts: -Xmx1024m -Xms1024m
## @param deleteTopicEnable Switch to enable topic deletion or not
##
deleteTopicEnable: false
## @param autoCreateTopicsEnable Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments
##
autoCreateTopicsEnable: true
## @param logFlushIntervalMessages The number of messages to accept before forcing a flush of data to disk
##
logFlushIntervalMessages: _10000
## @param logFlushIntervalMs The maximum amount of time a message can sit in a log before we force a flush
##
logFlushIntervalMs: 1000
## @param logRetentionBytes A size-based retention policy for logs
##
logRetentionBytes: _1073741824
## @param logRetentionCheckIntervalMs The interval at which log segments are checked to see if they can be deleted
##
logRetentionCheckIntervalMs: 300000
## @param logRetentionHours The minimum age of a log file to be eligible for deletion due to age
##
logRetentionHours: 168
## @param logSegmentBytes The maximum size of a log segment file. When this size is reached a new log segment will be created
##
logSegmentBytes: _1073741824
## @param logsDirs A comma separated list of directories under which to store log files
##
logsDirs: /opt/bitnami/kafka/logs
## @param maxMessageBytes The largest record batch size allowed by Kafka
##
maxMessageBytes: _1000012
## @param defaultReplicationFactor Default replication factors for automatically created topics
##
defaultReplicationFactor: 1
## @param offsetsTopicReplicationFactor The replication factor for the offsets topic
##
offsetsTopicReplicationFactor: 1
## @param transactionStateLogReplicationFactor The replication factor for the transaction topic
##
transactionStateLogReplicationFactor: 1
## @param transactionStateLogMinIsr Overridden min.insync.replicas config for the transaction topic
##
transactionStateLogMinIsr: 1
## @param numIoThreads The number of threads doing disk I/O
##
numIoThreads: 8
## @param numNetworkThreads The number of threads handling network requests
##
numNetworkThreads: 3
## @param numPartitions The default number of log partitions per topic
##
numPartitions: 1
## @param numRecoveryThreadsPerDataDir The number of threads per data directory to be used for log recovery at startup and flushing at shutdown
##
numRecoveryThreadsPerDataDir: 1
## @param socketReceiveBufferBytes The receive buffer (SO_RCVBUF) used by the socket server
##
socketReceiveBufferBytes: 102400
## @param socketRequestMaxBytes The maximum size of a request that the socket server will accept (protection against OOM)
##
socketRequestMaxBytes: _104857600
## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server
##
socketSendBufferBytes: 102400
## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper
##
zookeeperConnectionTimeoutMs: 6000
## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace
## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect
##
zookeeperChrootPath: ""
## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties
##
authorizerClassName: ""
## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users
##
allowEveryoneIfNoAclFound: true
## @param superUsers You can add super users in server.properties
##
superUsers: User:admin
## Authentication parameters
## https://github.com/bitnami/containers/tree/main/bitnami/kafka#security
##
auth:
  ## Authentication protocol for client and inter-broker communications
  ## This table shows the security provided on each protocol:
  ## | Method    | Authentication                | Encryption via TLS |
  ## | plaintext | None                          | No                 |
  ## | tls       | None                          | Yes                |
  ## | mtls      | Yes (two-way authentication)  | Yes                |
  ## | sasl      | Yes (via SASL)                | No                 |
  ## | sasl_tls  | Yes (via SASL)                | Yes                |
  ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`
  ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`
  ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`
  ##
  clientProtocol: plaintext
  # Note: empty by default for backwards compatibility reasons, find more information at
  # https://github.com/bitnami/charts/pull/8902/
  externalClientProtocol: ""
  interBrokerProtocol: plaintext
  ## SASL configuration
  ##
  sasl:
    ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512`
    ##
    mechanisms: plain,scram-sha-256,scram-sha-512
    ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication.
    ##
    interBrokerMechanism: plain
    ## JAAS configuration for SASL authentication.
    ##
    jaas:
      ## @param auth.sasl.jaas.clientUsers Kafka client user list
      ##
      ## clientUsers:
      ##   - user1
      ##   - user2
      ##
      clientUsers:
        - user
      ## @param auth.sasl.jaas.clientPasswords Kafka client passwords. This is mandatory if more than one user is specified in clientUsers
      ##
      ## clientPasswords:
      ##   - password1
      ##   - password2"
      ##
      clientPasswords: []
      ## @param auth.sasl.jaas.interBrokerUser Kafka inter broker communication user for SASL authentication
      ##
      interBrokerUser: admin
      ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication
      ##
      interBrokerPassword: ""
      ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication
      ##
      zookeeperUser: ""
      ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication
      ##
      zookeeperPassword: ""
      ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser
      ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create:
      ##       kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD
      ##
      existingSecret: ""
  ## TLS configuration
  ##
  tls:
    ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`
    ##
    type: jks
    ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert.
    ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA.
    ##
    pemChainIncluded: false
    ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers
    ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore.
    ## Create these secrets following the steps below:
    ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
    ## 2) Rename your truststore to `kafka.truststore.jks`.
    ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker.
    ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create):
    ##       kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks
    ##       kubectl create secret generic SECRET_NAME_1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks
    ##       ...
    ##
    ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key.
    ## Create these secrets following the steps below:
    ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA
    ## 2) Rename your CA file to `kafka.ca.crt`.
    ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker.
    ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker.
    ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create):
    ##       kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-0.tls.crt --from-file=tls.key=./kafka-0.tls.key
    ##       kubectl create secret generic SECRET_NAME_1 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-1.tls.crt --from-file=tls.key=./kafka-1.tls.key
    ##       ...
    ##
    existingSecrets: []
    ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem`
    ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty
    ##
    autoGenerated: false
    ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected.
    ## Note: ignored when using 'existingSecret'.
    ##
    password: ""
    ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`)
    ##
    existingSecret: ""
    ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets`
    ## Note: ignored when using 'pem' format for certificates.
    ##
    jksTruststoreSecret: ""
    ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate
    ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services:
    ##  - kafka-0.kafka-headless.kafka.svc.cluster.local
    ##  - kafka-1.kafka-headless.kafka.svc.cluster.local
    ##  - kafka-2.kafka-headless.kafka.svc.cluster.local
    ## Note: ignored when using 'pem' format for certificates.
    ##
    jksKeystoreSAN: ""
    ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore
    ## Note: ignored when using 'pem' format for certificates.
    ##
    jksTruststore: ""
    ## @param auth.tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate
    ## Disable server host name verification by setting it to an empty string.
    ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings
    ##
    endpointIdentificationAlgorithm: https
  ## Zookeeper client configuration for kafka brokers
  ##
  zookeeper:
    ## TLS configuration
    ##
    tls:
      ## @param auth.zookeeper.tls.enabled Enable TLS for Zookeeper client connections.
      ##
      enabled: false
      ## @param auth.zookeeper.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`.
      ##
      type: jks
      ## @param auth.zookeeper.tls.verifyHostname Hostname validation.
      ##
      verifyHostname: true
      ## @param auth.zookeeper.tls.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications.
      ##
      existingSecret: ""
      ## @param auth.zookeeper.tls.existingSecretKeystoreKey The secret key from the  auth.zookeeper.tls.existingSecret containing the Keystore.
      ##
      existingSecretKeystoreKey: zookeeper.keystore.jks
      ## @param auth.zookeeper.tls.existingSecretTruststoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore.
      ##
      existingSecretTruststoreKey: zookeeper.truststore.jks
      ## @param auth.zookeeper.tls.passwordsSecret Existing secret containing Keystore and Truststore passwords.
      ##
      passwordsSecret: ""
      ## @param auth.zookeeper.tls.passwordsSecretKeystoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore.
      ##
      passwordsSecretKeystoreKey: keystore-password
      ## @param auth.zookeeper.tls.passwordsSecretTruststoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore.
      ##
      passwordsSecretTruststoreKey: truststore-password
## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array
## When it's set to an empty array, the listeners will be configured
## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters)
##
listeners: []
## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array
## When it's set to an empty array, the advertised listeners will be configured
## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters)
##
advertisedListeners: []
## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil
## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters)
##
listenerSecurityProtocolMap: ""
## @param allowPlaintextListener Allow to use the PLAINTEXT listener
##
allowPlaintextListener: true
## @param interBrokerListenerName The listener that the brokers should communicate on
##
interBrokerListenerName: INTERNAL
## @param command Override Kafka container command
##
command:
  - /scripts/setup.sh
## @param args Override Kafka container arguments
##
args: []
## @param extraEnvVars Extra environment variables to add to Kafka pods
## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration
## e.g:
## extraEnvVars:
##   - name: KAFKA_CFG_BACKGROUND_THREADS
##     value: "10"
##
extraEnvVars: []
## @param extraEnvVarsCM ConfigMap with extra environment variables
##
extraEnvVarsCM: ""
## @param extraEnvVarsSecret Secret with extra environment variables
##
extraEnvVarsSecret: ""

## @section Statefulset parameters

## @param replicaCount Number of Kafka nodes
##
replicaCount: 3
## @param minBrokerId Minimal broker.id value, nodes increment their `broker.id` respectively
## Brokers increment their ID starting at this minimal value.
## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively.
##
minBrokerId: 0
## @param containerPorts.client Kafka client container port
## @param containerPorts.internal Kafka inter-broker container port
## @param containerPorts.external Kafka external container port
##
containerPorts:
  client: 9092
  internal: 9093
  external: 9094
## Configure extra options for Kafka containers' liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param livenessProbe.enabled Enable livenessProbe on Kafka containers
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
  enabled: true
  initialDelaySeconds: 10
  timeoutSeconds: 5
  failureThreshold: 3
  periodSeconds: 10
  successThreshold: 1
## @param readinessProbe.enabled Enable readinessProbe on Kafka containers
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
  enabled: true
  initialDelaySeconds: 5
  failureThreshold: 6
  timeoutSeconds: 5
  periodSeconds: 10
  successThreshold: 1
## @param startupProbe.enabled Enable startupProbe on Kafka containers
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param startupProbe.periodSeconds Period seconds for startupProbe
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param startupProbe.failureThreshold Failure threshold for startupProbe
## @param startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
  enabled: false
  initialDelaySeconds: 30
  periodSeconds: 10
  timeoutSeconds: 1
  failureThreshold: 15
  successThreshold: 1
## @param customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param customReadinessProbe Custom readinessProbe that overrides the default one
##
customReadinessProbe: {}
## @param customStartupProbe Custom startupProbe that overrides the default one
##
customStartupProbe: {}
## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup
##
lifecycleHooks: {}
## Kafka resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
  limits: {}
  requests: {}
## Kafka pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param podSecurityContext.enabled Enable security context for the pods
## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup
##
podSecurityContext:
  enabled: true
  fsGroup: 1001
## Kafka containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param containerSecurityContext.enabled Enable Kafka containers' Security Context
## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser
## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot
## e.g:
##   containerSecurityContext:
##     enabled: true
##     capabilities:
##       drop: ["NET_RAW"]
##     readOnlyRootFilesystem: true
##
containerSecurityContext:
  enabled: true
  runAsUser: 1001
  runAsNonRoot: true
## @param hostAliases Kafka pods host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param hostNetwork Specify if host network should be enabled for Kafka pods
##
hostNetwork: false
## @param hostIPC Specify if host IPC should be enabled for Kafka pods
##
hostIPC: false
## @param podLabels Extra labels for Kafka pods
## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param podAnnotations Extra annotations for Kafka pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
  ##
  type: ""
  ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
  ## E.g.
  ## key: "kubernetes.io/e2e-az-name"
  ##
  key: ""
  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
  ## E.g.
  ## values:
  ##   - e2e-az1
  ##   - e2e-az2
  ##
  values: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and  nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
##
topologySpreadConstraints: []
## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate
## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution
##
terminationGracePeriodSeconds: ""
## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
##
podManagementPolicy: Parallel
## @param priorityClassName Name of the existing priority class to be used by kafka pods
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## @param schedulerName Name of the k8s scheduler (other than default)
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param updateStrategy.type Kafka statefulset strategy type
## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
  type: RollingUpdate
  rollingUpdate: {}
## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s)
## e.g:
## extraVolumes:
##   - name: kafka-jaas
##     secret:
##       secretName: kafka-jaas
##
extraVolumes: []
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s)
## extraVolumeMounts:
##   - name: kafka-jaas
##     mountPath: /bitnami/kafka/config/kafka_jaas.conf
##     subPath: kafka_jaas.conf
##
extraVolumeMounts: []
## @param sidecars Add additional sidecar containers to the Kafka pod(s)
## e.g:
## sidecars:
##   - name: your-image-name
##     image: your-image
##     imagePullPolicy: Always
##     ports:
##       - name: portname
##         containerPort: 1234
##
sidecars: []
## @param initContainers Add additional Add init containers to the Kafka pod(s)
## e.g:
## initContainers:
##   - name: your-image-name
##     image: your-image
##     imagePullPolicy: Always
##     ports:
##       - name: portname
##         containerPort: 1234
##
initContainers: []
## Kafka Pod Disruption Budget
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
## @param pdb.create Deploy a pdb object for the Kafka pod
## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas
## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas
##
pdb:
  create: false
  minAvailable: ""
  maxUnavailable: 1

## @section Traffic Exposure parameters

## Service parameters
##
service:
  ## @param service.type Kubernetes Service type
  ##
  type: ClusterIP
  ## @param service.ports.client Kafka svc port for client connections
  ## @param service.ports.internal Kafka svc port for inter-broker connections
  ## @param service.ports.external Kafka svc port for external connections
  ##
  ports:
    client: 9092
    internal: 9093
    external: 9094
  ## @param service.nodePorts.client Node port for the Kafka client connections
  ## @param service.nodePorts.external Node port for the Kafka external connections
  ## NOTE: choose port between <30000-32767>
  ##
  nodePorts:
    client: ""
    external: ""
  ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
  ## Values: ClientIP or None
  ## ref: https://kubernetes.io/docs/user-guide/services/
  ##
  sessionAffinity: None
  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
  ## sessionAffinityConfig:
  ##   clientIP:
  ##     timeoutSeconds: 300
  ##
  sessionAffinityConfig: {}
  ## @param service.clusterIP Kafka service Cluster IP
  ## e.g.:
  ## clusterIP: None
  ##
  clusterIP: ""
  ## @param service.loadBalancerIP Kafka service Load Balancer IP
  ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
  ##
  loadBalancerIP: ""
  ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources
  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
  ## e.g:
  ## loadBalancerSourceRanges:
  ##   - 10.10.10.0/24
  ##
  loadBalancerSourceRanges: []
  ## @param service.externalTrafficPolicy Kafka service external traffic policy
  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
  ##
  externalTrafficPolicy: Cluster
  ## @param service.annotations Additional custom annotations for Kafka service
  ##
  annotations: {}
  ## Headless service properties
  ##
  headless:
    ## @param service.headless.annotations Annotations for the headless service.
    ##
    annotations: {}
    ## @param service.headless.labels Labels for the headless service.
    ##
    labels: {}
  ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value)
  ##
  extraPorts: []
## External Access to Kafka brokers configuration
##
externalAccess:
  ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers
  ##
  enabled: true
  ## External IPs auto-discovery configuration
  ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API
  ## Note: RBAC might be required
  ##
  autoDiscovery:
    ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API
    ##
    enabled: true
    ## Bitnami Kubectl image
    ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/
    ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry
    ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository
    ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended)
    ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy
    ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets
    ##
    image:
      registry: docker.io
      repository: bitnami/kubectl
      ## tag: 1.24.3-debian-11-r10
      tag: latest
      ## Specify a imagePullPolicy
      ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
      ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
      ##
      pullPolicy: IfNotPresent
      ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
      ## e.g:
      ## pullSecrets:
      ##   - myRegistryKeySecretName
      ##
      pullSecrets: []
    ## Init Container resource requests and limits
    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
    ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container
    ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container
    ##
    resources:
      limits: {}
      requests: {}
  ## Parameters to configure K8s service(s) used to externally access Kafka brokers
  ## Note: A new service per broker will be created
  ##
  service:
    ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort or LoadBalancer
    ##
    type: NodePort
    ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer
## Network policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
  ##
  enabled: false
  ## @param networkPolicy.allowExternal Don't require client label for connections
  ## When set to false, only pods with the correct client label will have network access to the port Kafka is
  ## listening on. When true, zookeeper accept connections from any source (with the correct destination port).
  ##
  allowExternal: true
  ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
  ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
  ## and that match other criteria, the ones that have the good label, can reach the kafka.
  ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this
  ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
  ##
  ## e.g:
  ## explicitNamespacesSelector:
  ##   matchLabels:
  ##     role: frontend
  ##   matchExpressions:
  ##    - {key: role, operator: In, values: [frontend]}
  ##
  explicitNamespacesSelector: {}
  ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port
  ## e.g:
  ## - ipBlock:
  ##    cidr: 172.9.0.0/16
  ##    except:
  ##    - 172.9.1.0/24
  ##
  externalAccess:
    from: []
  ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule
  ##
  egressRules:
    ## Additional custom egress rules
    ## e.g:
    ## customRules:
    ##   - to:
    ##       - namespaceSelector:
    ##           matchLabels:
    ##             label: example
    customRules: []

## @section Persistence parameters

## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
  ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected
  ##
  enabled: true
  ## @param persistence.existingClaim A manually managed Persistent Volume and Claim
  ## If defined, PVC must be created manually before volume will be bound
  ## The value is evaluated as a template
  ##
  existingClaim: ""
  ## @param persistence.storageClass PVC Storage Class for Kafka data volume
  ## If defined, storageClassName: <storageClass>
  ## If set to "-", storageClassName: "", which disables dynamic provisioning
  ## If undefined (the default) or set to null, no storageClassName spec is
  ## set, choosing the default provisioner.
  ##
  storageClass: ""
  ## @param persistence.accessModes Persistent Volume Access Modes
  ##
  accessModes:
    - ReadWriteOnce
  ## @param persistence.size PVC Storage Request for Kafka data volume
  ##
  size: 8Gi
  ## @param persistence.annotations Annotations for the PVC
  ##
  annotations: {}
  ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it
  ## selector:
  ##   matchLabels:
  ##     app: my-app
  ##
  selector: {}
  ## @param persistence.mountPath Mount path of the Kafka data volume
  ##
  mountPath: /bitnami/kafka
## Log Persistence parameters
##
logPersistence:
  ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected
  ##
  enabled: false
  ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim
  ## If defined, PVC must be created manually before volume will be bound
  ## The value is evaluated as a template
  ##
  existingClaim: ""
  ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume
  ## If defined, storageClassName: <storageClass>
  ## If set to "-", storageClassName: "", which disables dynamic provisioning
  ## If undefined (the default) or set to null, no storageClassName spec is
  ## set, choosing the default provisioner.
  ##
  storageClass: ""
  ## @param logPersistence.accessModes Persistent Volume Access Modes
  ##
  accessModes:
    - ReadWriteOnce
  ## @param logPersistence.size PVC Storage Request for Kafka logs volume
  ##
  size: 8Gi
  ## @param logPersistence.annotations Annotations for the PVC
  ##
  annotations: {}
  ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it
  ## selector:
  ##   matchLabels:
  ##     app: my-app
  ##
  selector: {}
  ## @param logPersistence.mountPath Mount path of the Kafka logs volume
  ##
  mountPath: /opt/bitnami/kafka/logs

## @section Volume Permissions parameters
##

## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
##
volumePermissions:
  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
  ##
  enabled: false
  ## @param volumePermissions.image.registry Init container volume-permissions image registry
  ## @param volumePermissions.image.repository Init container volume-permissions image repository
  ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
  ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
  ##
  image:
    registry: docker.io
    repository: bitnami/bitnami-shell
    tag: latest
    pullPolicy: IfNotPresent
    ## Optionally specify an array of imagePullSecrets.
    ## Secrets must be manually created in the namespace.
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
    ## Example:
    ## pullSecrets:
    ##   - myRegistryKeySecretName
    ##
    pullSecrets: []
  ## Init container resource requests and limits
  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
  ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
  ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
  ##
  resources:
    limits: {}
    requests: {}
  ## Init container' Security Context
  ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
  ## and not the below volumePermissions.containerSecurityContext.runAsUser
  ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
  ##
  containerSecurityContext:
    runAsUser: 0

## @section Other Parameters

## ServiceAccount for Kafka
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
  ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods
  ##
  create: true
  ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated
  ## If not set and create is true, a name is generated using the kafka.serviceAccountName template
  ##
  name: ""
  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
  ##
  automountServiceAccountToken: true
  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
  ##
  annotations: {}
## Role Based Access Control
## ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
  ## @param rbac.create Whether to create & use RBAC resources or not
  ## binding Kafka ServiceAccount to a role
  ## that allows Kafka pods querying the K8s API
  ##
  create: true

## @section Metrics parameters

## Prometheus Exporters / Metrics
##
metrics:
  ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
  ##
  kafka:
    ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics
    ##
    enabled: false
    ## Bitnami Kafka exporter image
    ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/
    ## @param metrics.kafka.image.registry Kafka exporter image registry
    ## @param metrics.kafka.image.repository Kafka exporter image repository
    ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended)
    ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy
    ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array
    ##
    image:
      registry: docker.io
      repository: bitnami/kafka-exporter
      tag: latest
      ## Specify a imagePullPolicy
      ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
      ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
      ##
      pullPolicy: IfNotPresent
      ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
      ## e.g:
      ## pullSecrets:
      ##   - myRegistryKeySecretName
      ##
      pullSecrets: []

    ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files
    ## for Kafka exporter client authentication
    ##
    certificatesSecret: ""
    ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file)
    ##
    tlsCert: cert-file
    ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file)
    ##
    tlsKey: key-file
    ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication
    ##
    tlsCaSecret: ""
    ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file)
    ##
    tlsCaCert: ca-file
    ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter
    ## e.g:
    ## extraFlags:
    ##   tls.insecure-skip-tls-verify: ""
    ##   web.telemetry-path: "/metrics"
    ##
    extraFlags: {}
    ## @param metrics.kafka.command Override Kafka exporter container command
    ##
    command: []
    ## @param metrics.kafka.args Override Kafka exporter container arguments
    ##
    args: []
    ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port
    ##
    containerPorts:
      metrics: 9308
    ## Kafka exporter resource requests and limits
    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
    ## @param metrics.kafka.resources.limits The resources limits for the container
    ## @param metrics.kafka.resources.requests The requested resources for the container
    ##
    resources:
      limits: {}
      requests: {}
    ## Kafka exporter pods' Security Context
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
    ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods
    ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup
    ##
    podSecurityContext:
      enabled: true
      fsGroup: 1001
    ## Kafka exporter containers' Security Context
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
    ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context
    ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser
    ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot
    ## e.g:
    ##   containerSecurityContext:
    ##     enabled: true
    ##     capabilities:
    ##       drop: ["NET_RAW"]
    ##     readOnlyRootFilesystem: true
    ##
    containerSecurityContext:
      enabled: true
      runAsUser: 1001
      runAsNonRoot: true
    ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases
    ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
    ##
    hostAliases: []
    ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods
    ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
    ##
    podLabels: {}
    ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods
    ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
    ##
    podAnnotations: {}
    ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`
    ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
    ##
    podAffinityPreset: ""
    ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`
    ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
    ##
    podAntiAffinityPreset: soft
    ## Node metrics.kafka.affinity preset
    ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
    ##
    nodeAffinityPreset:
      ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`
      ##
      type: ""
      ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set.
      ## E.g.
      ## key: "kubernetes.io/e2e-az-name"
      ##
      key: ""
      ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set.
      ## E.g.
      ## values:
      ##   - e2e-az1
      ##   - e2e-az2
      ##
      values: []
    ## @param metrics.kafka.affinity Affinity for pod assignment
    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
    ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set
    ##
    affinity: {}
    ## @param metrics.kafka.nodeSelector Node labels for pod assignment
    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
    ##
    nodeSelector: {}
    ## @param metrics.kafka.tolerations Tolerations for pod assignment
    ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
    ##
    tolerations: []
    ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter
    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
    ##
    schedulerName: ""
    ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName
    ##
    priorityClassName: ""
    ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment
    ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
    ## The value is evaluated as a template
    ##
    topologySpreadConstraints: []
    ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s)
    ## e.g:
    ## extraVolumes:
    ##   - name: kafka-jaas
    ##     secret:
    ##       secretName: kafka-jaas
    ##
    extraVolumes: []
    ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s)
    ## extraVolumeMounts:
    ##   - name: kafka-jaas
    ##     mountPath: /bitnami/kafka/config/kafka_jaas.conf
    ##     subPath: kafka_jaas.conf
    ##
    extraVolumeMounts: []
    ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s)
    ## e.g:
    ## sidecars:
    ##   - name: your-image-name
    ##     image: your-image
    ##     imagePullPolicy: Always
    ##     ports:
    ##       - name: portname
    ##         containerPort: 1234
    ##
    sidecars: []
    ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods
    ## e.g:
    ## initContainers:
    ##   - name: your-image-name
    ##     image: your-image
    ##     imagePullPolicy: Always
    ##     ports:
    ##       - name: portname
    ##         containerPort: 1234
    ##
    initContainers: []
    ## Kafka exporter service configuration
    ##
    service:
      ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port
      ##
      ports:
        metrics: 9308
      ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services
      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
      ##
      clusterIP: ""
      ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin
      ## Values: ClientIP or None
      ## ref: https://kubernetes.io/docs/user-guide/services/
      ##
      sessionAffinity: None
      ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service
      ##
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}"
        prometheus.io/path: "/metrics"
    ## Kafka exporter pods ServiceAccount
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
    ##
    serviceAccount:
      ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods
      ##
      create: true
      ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated
      ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template
      ##
      name: ""
      ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
      ## Can be set to false if pods using this serviceAccount do not need to use K8s API
      ##
      automountServiceAccountToken: true
  ## Prometheus JMX exporter: exposes the majority of Kafkas metrics
  ##
  jmx:
    ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus
    ##
    enabled: false
    ## Bitnami JMX exporter image
    ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/
    ## @param metrics.jmx.image.registry JMX exporter image registry
    ## @param metrics.jmx.image.repository JMX exporter image repository
    ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended)
    ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy
    ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array
    ##
    image:
      registry: docker.io
      repository: bitnami/jmx-exporter
      tag: latest
      ## Specify a imagePullPolicy
      ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
      ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
      ##
      pullPolicy: IfNotPresent
      ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
      ## e.g:
      ## pullSecrets:
      ##   - myRegistryKeySecretName
      ##
      pullSecrets: []
    ## Prometheus JMX exporter containers' Security Context
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
    ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context
    ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser
    ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot
    ## e.g:
    ##   containerSecurityContext:
    ##     enabled: true
    ##     capabilities:
    ##       drop: ["NET_RAW"]
    ##     readOnlyRootFilesystem: true
    ##
    containerSecurityContext:
      enabled: true
      runAsUser: 1001
      runAsNonRoot: true
    ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port
    ##
    containerPorts:
      metrics: 5556
    ## Prometheus JMX exporter resource requests and limits
    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
    ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container
    ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container
    ##
    resources:
      limits: {}
      requests: {}
    ## Prometheus JMX exporter service configuration
    ##
    service:
      ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port
      ##
      ports:
        metrics: 5556
      ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services
      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
      ##
      clusterIP: ""
      ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin
      ## Values: ClientIP or None
      ## ref: https://kubernetes.io/docs/user-guide/services/
      ##
      sessionAffinity: None
      ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service
      ##
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}"
        prometheus.io/path: "/"
    ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter
    ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics
    ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
    ## (2) commented out above `overrideConfig`.
    ##
    whitelistObjectNames:
      - kafka.controller:*
      - kafka.server:*
      - java.lang:*
      - kafka.network:*
      - kafka.log:*
    ## @param metrics.jmx.config [string] Configuration file for JMX exporter
    ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template
    ##
    ## Credits to the incubator/kafka chart for the JMX configuration.
    ## https://github.com/helm/charts/tree/master/incubator/kafka
    ##
    config: |-
      jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
      lowercaseOutputName: true
      lowercaseOutputLabelNames: true
      ssl: false
      {{- if .Values.metrics.jmx.whitelistObjectNames }}
      whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
      {{- end }}
    ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration
    ## NOTE: This will override metrics.jmx.config
    ##
    existingConfigmap: ""
  ## Prometheus Operator ServiceMonitor configuration
  ##
  serviceMonitor:
    ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`)
    ##
    enabled: false
    ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
    ##
    namespace: ""
    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    interval: ""
    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    scrapeTimeout: ""
    ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
    ##
    labels: {}
    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
    ##
    selector: {}
    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
    ##
    relabelings: []
    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
    ##
    metricRelabelings: []
    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
    ##
    honorLabels: false
    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
    ##
    jobLabel: ""

## @section Kafka provisioning parameters

## Kafka provisioning
##
provisioning:
  ## @param provisioning.enabled Enable kafka provisioning Job
  ##
  enabled: false
  ## @param provisioning.numPartitions Default number of partitions for topics when unspecified
  ##
  numPartitions: 1
  ## @param provisioning.replicationFactor Default replication factor for topics when unspecified
  ##
  replicationFactor: 1
  ## @param provisioning.topics Kafka topics to provision
  ## - name: topic-name
  ##   partitions: 1
  ##   replicationFactor: 1
  ##   ## https://kafka.apache.org/documentation/#topicconfigs
  ##   config:
  ##     max.message.bytes: 64000
  ##     flush.messages: 1
  ##
  topics: []
  ## @param provisioning.tolerations Tolerations for pod assignment
  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  ##
  tolerations: []
  ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources
  ## - echo "Allow user to consume from any topic"
  ## - >-
  ##   /opt/bitnami/kafka/bin/kafka-acls.sh
  ##   --bootstrap-server $KAFKA_SERVICE
  ##   --command-config $CLIENT_CONF
  ##   --add
  ##   --allow-principal User:user
  ##   --consumer --topic '*'
  ## - "/opt/bitnami/kafka/bin/kafka-acls.sh
  ##      --bootstrap-server $KAFKA_SERVICE
  ##      --command-config $CLIENT_CONF
  ##      --list"
  ##
  extraProvisioningCommands: []
  ## @param provisioning.parallel Number of provisioning commands to run at the same time
  ##
  parallel: 1
  ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations
  ##
  preScript: ""
  ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations
  ##
  postScript: ""
  ## Auth Configuration for kafka provisioning Job
  ##
  auth:
    ## TLS configuration for kafka provisioning Job
    ##
    tls:
      ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`.
      ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls".
      ##
      type: jks
      ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job.
      ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore.
      ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key.
      ##
      certificatesSecret: ""
      ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt)
      ##
      cert: tls.crt
      ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key)
      ##
      key: tls.key
      ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt)
      ##
      caCert: ca.crt
      ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks)
      ##
      keystore: keystore.jks
      ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks)
      ##
      truststore: truststore.jks
      ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected.
      ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key.
      ##
      passwordsSecret: ""
      ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password)
      ## Note: must not be used if `passwordsSecret` is not defined.
      ##
      keyPasswordSecretKey: key-password
      ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password)
      ## Note: must not be used if `passwordsSecret` is not defined.
      ##
      keystorePasswordSecretKey: keystore-password
      ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password)
      ## Note: must not be used if `passwordsSecret` is not defined.
      ##
      truststorePasswordSecretKey: truststore-password
      ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided.
      ##
      keyPassword: ""
      ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided.
      ##
      keystorePassword: ""
      ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided.
      ##
      truststorePassword: ""
  ## @param provisioning.command Override provisioning container command
  ##
  command: []
  ## @param provisioning.args Override provisioning container arguments
  ##
  args: []
  ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod
  ## e.g:
  ## extraEnvVars:
  ##   - name: KAFKA_CFG_BACKGROUND_THREADS
  ##     value: "10"
  ##
  extraEnvVars: []
  ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables
  ##
  extraEnvVarsCM: ""
  ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables
  ##
  extraEnvVarsSecret: ""
  ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods
  ##
  podAnnotations: {}
  ## @param provisioning.podLabels Extra labels for Kafka provisioning pods
  ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
  ##
  podLabels: {}
  ## Kafka provisioning resource requests and limits
  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
  ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container
  ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container
  ##
  resources:
    limits: {}
    requests: {}
  ## Kafka provisioning pods' Security Context
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
  ## @param provisioning.podSecurityContext.enabled Enable security context for the pods
  ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup
  ##
  podSecurityContext:
    enabled: true
    fsGroup: 1001
  ## Kafka provisioning containers' Security Context
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
  ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context
  ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser
  ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot
  ## e.g:
  ##   containerSecurityContext:
  ##     enabled: true
  ##     capabilities:
  ##       drop: ["NET_RAW"]
  ##     readOnlyRootFilesystem: true
  ##
  containerSecurityContext:
    enabled: true
    runAsUser: 1001
    runAsNonRoot: true
  ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning
  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  ##
  schedulerName: ""
  ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s)
  ## e.g:
  ## extraVolumes:
  ##   - name: kafka-jaas
  ##     secret:
  ##       secretName: kafka-jaas
  ##
  extraVolumes: []
  ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s)
  ## extraVolumeMounts:
  ##   - name: kafka-jaas
  ##     mountPath: /bitnami/kafka/config/kafka_jaas.conf
  ##     subPath: kafka_jaas.conf
  ##
  extraVolumeMounts: []
  ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s)
  ## e.g:
  ## sidecars:
  ##   - name: your-image-name
  ##     image: your-image
  ##     imagePullPolicy: Always
  ##     ports:
  ##       - name: portname
  ##         containerPort: 1234
  ##
  sidecars: []
  ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s)
  ## e.g:
  ## initContainers:
  ##   - name: your-image-name
  ##     image: your-image
  ##     imagePullPolicy: Always
  ##     ports:
  ##       - name: portname
  ##         containerPort: 1234
  ##
  initContainers: []
  ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning
  ##
  waitForKafka: true

## @section ZooKeeper chart parameters

## ZooKeeper chart configuration
## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml
##
zookeeper:
  ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart
  ##
  enabled: true
  ## @param zookeeper.replicaCount Number of ZooKeeper nodes
  ##
  replicaCount: 1
  ## ZooKeeper authenticaiton
  ##
  auth:
    client:
      ## @param zookeeper.auth.client.enabled Enable ZooKeeper auth
      ##
      enabled: false
      ## @param zookeeper.auth.client.clientUser User that will use ZooKeeper clients to auth
      ##
      clientUser: ""
      ## @param zookeeper.auth.client.clientPassword Password that will use ZooKeeper clients to auth
      ##
      clientPassword: ""
      ## @param zookeeper.auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin"
      ##
      serverUsers: ""
      ## @param zookeeper.auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
      ##
      serverPasswords: ""
  ## ZooKeeper Persistence parameters
  ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
  ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s)
  ## @param zookeeper.persistence.storageClass Persistent Volume storage class
  ## @param zookeeper.persistence.accessModes Persistent Volume access modes
  ## @param zookeeper.persistence.size Persistent Volume size
  ##
  persistence:
    enabled: true
    storageClass: ""
    accessModes:
      - ReadWriteOnce
    size: 8Gi

## External Zookeeper Configuration
## All of these values are only used if `zookeeper.enabled=false`
##
externalZookeeper:
  ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'.
  ##
  servers: []

5. 参数配置例子:

global:
  storageClass: "自己的nfs动态存储"
replicaCount: 3
externalAccess:
  enabled: true
  service:
    type: NodePort
  autoDiscovery:
    enabled: true
serviceAccount:
  create: true
rbac:
  create: true

storageClass:使用NFS存储
replicaCount:集群3个节点
externalAccess.enabled:开启外部群集访问Kafka代理
externalAccess.service.type: 访问类型是 NodePort
externalAccess.autoDiscovery.enabled:启用使用init容器自动检测外部IP/端口
serviceAccount.create:启用为Kafka pods创建ServiceAccount
rbac.create:创建和使用RBAC资源

6.进入  values.yaml当前目录启动

helm install kafka bitnami/kafka -f values.yaml

7. 执行查看结果:

[root@master kafka]# kubectl get all
NAME                                          READY   STATUS    RESTARTS   AGE
pod/kafka-0                                   1/1     Running   0          18m
pod/kafka-1                                   1/1     Running   0          18m
pod/kafka-2                                   1/1     Running   0          18m
pod/kafka-zookeeper-0                         1/1     Running   0          18m
pod/nfs-client-provisioner-7ccbb57c55-96m57   1/1     Running   0          21m

NAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
service/kafka                      ClusterIP   10.101.68.123   <none>        9092/TCP                     18m
service/kafka-0-external           NodePort    10.99.134.253   <none>        9094:32743/TCP               18m
service/kafka-1-external           NodePort    10.102.158.7    <none>        9094:32320/TCP               18m
service/kafka-2-external           NodePort    10.96.32.52     <none>        9094:30192/TCP               18m
service/kafka-headless             ClusterIP   None            <none>        9092/TCP,9093/TCP            19m
service/kafka-zookeeper            ClusterIP   10.106.139.0    <none>        2181/TCP,2888/TCP,3888/TCP   18m
service/kafka-zookeeper-headless   ClusterIP   None            <none>        2181/TCP,2888/TCP,3888/TCP   19m
service/kubernetes                 ClusterIP   10.96.0.1       <none>        443/TCP                      29d

NAME                                     READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nfs-client-provisioner   1/1     1            1           21m

NAME                                                DESIRED   CURRENT   READY   AGE
replicaset.apps/nfs-client-provisioner-7ccbb57c55   1         1         1       21m

NAME                               READY   AGE
statefulset.apps/kafka             3/3     18m
statefulset.apps/kafka-zookeeper   1/1     18m
[root@master kafka]# 

8.代码连接  3个NodePort端口即可,已经测试可以连接并发送消息

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐