前情概要:历经了太多的坑,从简单的到困难的,该文章主要是为大家尽可能的展现安装部署流程中遇见的坑!
如果2024年7月15日17:13:41 你处在这个时间阶段 附近,你会发现docker下载镜像失败! 这个问题,没有办法,请使用魔法

官方部署网址:https://milvus.io/docs/install_cluster-helm.md
1.如果你想要直接部署,不对接外部组件,直接使用在线部署,当前要注意上面的问题:使用魔法先把需要的镜像下载下来!
镜像如下:

milvusdb/milvus:v2.4.5 
milvusdb/milvus-config-tool:v0.1.2
docker.io/milvusdb/etcd:3.5.5-r4
zilliz/attu:v2.3.10
apachepulsar/pulsar:2.8.2
apachepulsar/pulsar-manager:v0.2.0
milvusdb/heaptrack:v0.1.0

下面是对接外部etcd 和 外部pulsar 的配置 、对接外部s3
value.yaml

## Enable or disable Milvus Cluster mode
cluster:
  enabled: true

image:
  all:
    repository: milvusdb/milvus
    tag: v2.4.5
    pullPolicy: IfNotPresent
    ## Optionally specify an array of imagePullSecrets.
    ## Secrets must be manually created in the namespace.
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
    ##
    # pullSecrets:
    #   - myRegistryKeySecretName
  tools:
    repository: milvusdb/milvus-config-tool
    tag: v0.1.2
    pullPolicy: IfNotPresent

# Global node selector
# If set, this will apply to all milvus components
# Individual components can be set to a different node selector
nodeSelector: {}

# Global tolerations
# If set, this will apply to all milvus components
# Individual components can be set to a different tolerations
tolerations: []

# Global affinity
# If set, this will apply to all milvus components
# Individual components can be set to a different affinity
affinity: {}

# Global labels and annotations
# If set, this will apply to all milvus components
labels: {}
annotations: {}

# Extra configs for milvus.yaml
# If set, this config will merge into milvus.yaml
# Please follow the config structure in the milvus.yaml
# at https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml
# Note: this config will be the top priority which will override the config
# in the image and helm chart.
extraConfigFiles:
  user.yaml: |+
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1

## Expose the Milvus service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
  type: NodePort
  port: 19530
  portName: milvus
  nodePort: ""
  annotations: {}
  labels: {}

  ## List of IP addresses at which the Milvus service is available
  ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  ##
  externalIPs: []
  #   - externalIp1

  # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
  # set allowed inbound rules on the security group assigned to the master load balancer
  loadBalancerSourceRanges:
  - 0.0.0.0/0
  # Optionally assign a known public LB IP
  # loadBalancerIP: 1.2.3.4

ingress:
  enabled: false
  annotations:
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/backend-protocol: GRPC
    nginx.ingress.kubernetes.io/listen-ports-ssl: '[19530]'
    nginx.ingress.kubernetes.io/proxy-body-size: 4m
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
  labels: {}
  rules:
    - host: "milvus-example.local"
      path: "/"
      pathType: "Prefix"
    # - host: "milvus-example2.local"
    #   path: "/otherpath"
    #   pathType: "Prefix"
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - milvus-example.local

serviceAccount:
  create: false
  name:
  annotations:
  labels:

metrics:
  enabled: true

  serviceMonitor:
    # Set this to `true` to create ServiceMonitor for Prometheus operator
    enabled: false
    interval: "30s"
    scrapeTimeout: "10s"
    # Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
    additionalLabels: {}

livenessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 30
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

readinessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 10
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

log:
  level: "info"
  file:
    maxSize: 300    # MB
    maxAge: 10    # day
    maxBackups: 20
  format: "text"    # text/json

  persistence:
    mountPath: "/milvus/logs"
    ## If true, create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: false
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Logs Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ## ReadWriteMany access mode required for milvus cluster.
      ##
      storageClass:
      accessModes: ReadWriteMany
      size: 10Gi
      subPath: ""

## Heaptrack traces all memory allocations and annotates these events with stack traces.
## See more: https://github.com/KDE/heaptrack
## Enable heaptrack in production is not recommended.
heaptrack:
  image:
    repository: milvusdb/heaptrack
    tag: v0.1.0
    pullPolicy: IfNotPresent

standalone:
  replicas: 1  # Run standalone mode with replication disabled
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling

  ## Default message queue for milvus standalone
  ## Supported value: rocksmq, natsmq, pulsar and kafka
  messageQueue: rocksmq
  persistence:
    mountPath: "/var/lib/milvus"
    ## If true, alertmanager will create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: true
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ##
      storageClass: "csi-driver-s3"
      accessModes: ReadWriteOnce
      size: 50Gi
      subPath: ""

proxy:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  http:
    enabled: true  # whether to enable http rest server
    debugMode:
      enabled: false
  # Mount a TLS secret into proxy pod
  tls:
    enabled: false
## when enabling proxy.tls, all items below should be uncommented and the key and crt values should be populated.
#    enabled: true
#    secretName: milvus-tls
## expecting base64 encoded values here: i.e. $(cat tls.crt | base64 -w 0) and $(cat tls.key | base64 -w 0)
#    key: LS0tLS1CRUdJTiBQU--REDUCT
#    crt: LS0tLS1CRUdJTiBDR--REDUCT
#  volumes:
#  - secret:
#      secretName: milvus-tls
#    name: milvus-tls
#  volumeMounts:
#  - mountPath: /etc/milvus/certs/
#    name: milvus-tls
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

rootCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Root Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for root coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 53100
    annotations: {}
    labels: {}
    clusterIP: ""

queryCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Query Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for query coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 19531
    annotations: {}
    labels: {}
    clusterIP: ""

queryNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true  # Enable querynode load disk index, and search on disk index
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

indexCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1   # Run Index Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for index coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 31000
    annotations: {}
    labels: {}
    clusterIP: ""

indexNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # limits:
  #    ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  disk:
    enabled: true  # Enable index node build disk vector index
    size:
      enabled: false  # Enable local storage size limit
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

dataCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Data Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for data coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 13333
    annotations: {}
    labels: {}
    clusterIP: ""

dataNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

## mixCoordinator contains all coord
## If you want to use mixcoord, enable this and disable all of other coords
mixCoordinator:
  enabled: false
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Mixture Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for Mixture coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    annotations: {}
    labels: {}
    clusterIP: ""

attu:
  enabled: true
  name: attu
  image:
    repository: zilliz/attu
    tag: v2.3.10
    pullPolicy: IfNotPresent
  service:
    annotations: {}
    labels: {}
    type: NodePort
    port: 3000
    # loadBalancerIP: ""
  resources: {}
  podLabels: {}
  ingress:
    enabled: false
    annotations: {}
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    labels: {}
    hosts:
      - milvus-attu.local
    tls: []
    #  - secretName: chart-attu-tls
    #    hosts:
    #      - milvus-attu.local


## Configuration values for the minio dependency
## ref: https://github.com/zilliztech/milvus-helm/blob/master/charts/minio/README.md
##

minio:
  enabled: false
  name: minio
  mode: distributed
  image:
    tag: "RELEASE.2023-03-20T20-16-18Z"
    pullPolicy: IfNotPresent
  accessKey: xxx
  secretKey: xxx
  existingSecret: ""
  bucketName: "milvus-bucket"
  rootPath: file
  useIAM: false
  iamEndpoint: ""
  region: ""
  useVirtualHost: false
  podDisruptionBudget:
    enabled: false
  resources:
    requests:
      memory: 2Gi

  service:
    type: ClusterIP
    port: 9000

  persistence:
    enabled: true
    existingClaim: ""
    storageClass: "csi-driver-s3"
    accessMode: ReadWriteOnce
    size: 500Gi

  livenessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 5

  readinessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 1
    successThreshold: 1
    failureThreshold: 5

  startupProbe:
    enabled: true
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 60

## Configuration values for the etcd dependency
## ref: https://artifacthub.io/packages/helm/bitnami/etcd
##

etcd:
  enabled: false
  name: etcd
  replicaCount: 3
  pdb:
    create: false
  image:
    repository: "milvusdb/etcd"
    tag: "3.5.5-r4"
    pullPolicy: IfNotPresent

  service:
    type: ClusterIP
    port: 2379
    peerPort: 2380

  auth:
    rbac:
      enabled: false

  persistence:
    enabled: true
    storageClass: "csi-driver-s3"
    accessMode: ReadWriteOnce
    size: 10Gi

  ## Change default timeout periods to mitigate zoobie probe process
  livenessProbe:
    enabled: true
    timeoutSeconds: 10

  readinessProbe:
    enabled: true
    periodSeconds: 20
    timeoutSeconds: 10

  ## Enable auto compaction
  ## compaction by every 1000 revision
  ##
  autoCompactionMode: revision
  autoCompactionRetention: "1000"

  ## Increase default quota to 4G
  ##
  extraEnvVars:
  - name: ETCD_QUOTA_BACKEND_BYTES
    value: "4294967296"
  - name: ETCD_HEARTBEAT_INTERVAL
    value: "500"
  - name: ETCD_ELECTION_TIMEOUT
    value: "2500"

## Configuration values for the pulsar dependency
## ref: https://github.com/apache/pulsar-helm-chart
##

pulsar:
  enabled: false
  name: pulsar

  fullnameOverride: ""
  persistence: true

  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.

  rbac:
    enabled: false
    psp: false
    limit_to_namespace: true

  affinity:
    anti_affinity: false

## enableAntiAffinity: no

  components:
    zookeeper: true
    bookkeeper: true
    # bookkeeper - autorecovery
    autorecovery: true
    broker: true
    functions: false
    proxy: true
    toolset: false
    pulsar_manager: false

  monitoring:
    prometheus: false
    grafana: false
    node_exporter: false
    alert_manager: false

  images:
    broker:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    autorecovery:
      repository: apachepulsar/pulsar
      tag: 2.8.2
      pullPolicy: IfNotPresent
    zookeeper:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    bookie:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    proxy:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    pulsar_manager:
      repository: apachepulsar/pulsar-manager
      pullPolicy: IfNotPresent
      tag: v0.1.0

  zookeeper:
    resources:
      requests:
        memory: 1024Mi
        cpu: 0.3
    configData:
      PULSAR_MEM: >
        -Xms1024m
        -Xmx1024m
      PULSAR_GC: >
         -Dcom.sun.management.jmxremote
         -Djute.maxbuffer=10485760
         -XX:+ParallelRefProcEnabled
         -XX:+UnlockExperimentalVMOptions
         -XX:+DoEscapeAnalysis
         -XX:+DisableExplicitGC
         -XX:+PerfDisableSharedMem
         -Dzookeeper.forceSync=no
    pdb:
      usePolicy: false

  bookkeeper:
    replicaCount: 3
    volumes:
      journal:
        name: journal
        size: 100Gi
      ledgers:
        name: ledgers
        size: 200Gi
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+UseG1GC -XX:MaxGCPauseMillis=10
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
        -XX:+PerfDisableSharedMem
        -XX:+PrintGCDetails
      nettyMaxFrameSizeBytes: "104867840"
    pdb:
      usePolicy: false

  broker:
    component: broker
    podMonitor:
      enabled: false
    replicaCount: 1
    resources:
      requests:
        memory: 4096Mi
        cpu: 1.5
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
      maxMessageSize: "104857600"
      defaultRetentionTimeInMinutes: "10080"
      defaultRetentionSizeInMB: "-1"
      backlogQuotaDefaultLimitGB: "8"
      ttlDurationDefaultInSeconds: "259200"
      subscriptionExpirationTimeMinutes: "3"
      backlogQuotaDefaultRetentionPolicy: producer_exception
    pdb:
      usePolicy: false

  autorecovery:
    resources:
      requests:
        memory: 512Mi
        cpu: 1

  proxy:
    replicaCount: 1
    podMonitor:
      enabled: false
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    service:
      type: ClusterIP
    ports:
      pulsar: 6650
    configData:
      PULSAR_MEM: >
        -Xms2048m -Xmx2048m
      PULSAR_GC: >
        -XX:MaxDirectMemorySize=2048m
      httpNumThreads: "100"
    pdb:
      usePolicy: false

  pulsar_manager:
    service:
      type: ClusterIP

  pulsar_metadata:
    component: pulsar-init
    image:
      # the image used for running `pulsar-cluster-initialize` job
      repository: apachepulsar/pulsar
      tag: 2.8.2


## Configuration values for the kafka dependency
## ref: https://artifacthub.io/packages/helm/bitnami/kafka
##

kafka:
  enabled: false
  name: kafka
  replicaCount: 3
  image:
    repository: bitnami/kafka
    tag: 3.1.0-debian-10-r52
  ## Increase graceful termination for kafka graceful shutdown
  terminationGracePeriodSeconds: "90"
  pdb:
    create: false

  ## Enable startup probe to prevent pod restart during recovering
  startupProbe:
    enabled: true

  ## Kafka Java Heap size
  heapOpts: "-Xmx4096m -Xms4096m"
  maxMessageBytes: _10485760
  defaultReplicationFactor: 3
  offsetsTopicReplicationFactor: 3
  ## Only enable time based log retention
  logRetentionHours: 168
  logRetentionBytes: _-1
  extraEnvVars:
  - name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
    value: "5242880"
  - name: KAFKA_CFG_MAX_REQUEST_SIZE
    value: "5242880"
  - name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES
    value: "10485760"
  - name: KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES
    value: "5242880"
  - name: KAFKA_CFG_LOG_ROLL_HOURS
    value: "24"

  persistence:
    enabled: true
    storageClass:
    accessMode: ReadWriteOnce
    size: 300Gi

  metrics:
    ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
    kafka:
      enabled: false
      image:
        repository: bitnami/kafka-exporter
        tag: 1.4.2-debian-10-r182

    ## Prometheus JMX exporter: exposes the majority of Kafkas metrics
    jmx:
      enabled: false
      image:
        repository: bitnami/jmx-exporter
        tag: 0.16.1-debian-10-r245

    ## To enable serviceMonitor, you must enable either kafka exporter or jmx exporter.
    ## And you can enable them both
    serviceMonitor:
      enabled: false

  service:
    type: ClusterIP
    ports:
      client: 9092

  zookeeper:
    enabled: true
    replicaCount: 3

###################################
# External S3
# - these configs are only used when `externalS3.enabled` is true
###################################
externalS3:
  enabled: true
  host: "xxxx"
  port: "9000"
  accessKey: "xx"
  secretKey: "xx"
  useSSL: false
  bucketName: "milvus-dev"
  rootPath: ""
  useIAM: false
  cloudProvider: "aws"
  iamEndpoint: ""
  region: ""
  useVirtualHost: false

###################################
# GCS Gateway
# - these configs are only used when `minio.gcsgateway.enabled` is true
###################################
externalGcs:
  bucketName: ""

###################################
# External etcd
# - these configs are only used when `externalEtcd.enabled` is true
###################################
externalEtcd:
  enabled: true

  ## the endpoints of the external etcd
  ##
  endpoints:
    - xxxx:23790

###################################
# External pulsar
# - these configs are only used when `externalPulsar.enabled` is true
###################################
externalPulsar:
  enabled: true
  host: "xxx"
  port: 30012
  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
  tenant: "xx"
  namespace: "xxx"
  authPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
  authParams: token:"xxx"

###################################
# External kafka
# - these configs are only used when `externalKafka.enabled` is true
# - note that the following are just examples, you should confirm the
#   value of brokerList and mechanisms according to the actual external
#   Kafka configuration. E.g. If you select the AWS MSK, the configuration
#   should look something like this:
#   externalKafka:
#     enabled: true
#     brokerList: "xxxx:9096"
#     securityProtocol: SASL_SSL
#     sasl:
#       mechanisms: SCRAM-SHA-512
#       password: "xxx"
#       username: "xxx"
###################################
externalKafka:
  enabled: false
  brokerList: localhost:9092
  securityProtocol: SASL_SSL
  sasl:
    mechanisms: PLAIN
    username: ""
    password: ""

k8s可执行文件milvus_manifest.yaml

---
# Source: milvus/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: my-release-milvus
data:
  default.yaml: |+
    # Copyright (C) 2019-2021 Zilliz. All rights reserved.
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
    # with the License. You may obtain a copy of the License at
    #
    # http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software distributed under the License
    # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
    # or implied. See the License for the specific language governing permissions and limitations under the License.
    
    etcd:
      endpoints:
        - xxxx:23790
    
    metastore:
      type: etcd
    
    minio:
      address: xxxx
      port: 9000
      accessKeyID: minioadmin
      secretAccessKey: minioadmin
      useSSL: false
      bucketName: milvus-dev
      rootPath:
      useIAM: false
      cloudProvider: aws
      iamEndpoint:
      region:
      useVirtualHost: false

    mq:
      type: pulsar

    messageQueue: pulsar

    pulsar:
      address: xxx
      port: 6650
      maxMessageSize: 5242880
      tenant: "my-tenant"
      namespace: my-namespace

    
    rootCoord:
      address: my-release-milvus-rootcoord
      port: 53100
      enableActiveStandby: true  # Enable rootcoord active-standby
    
    proxy:
      port: 19530
      internalPort: 19529
    
    queryCoord:
      address: my-release-milvus-querycoord
      port: 19531
    
      enableActiveStandby: true  # Enable querycoord active-standby
    
    queryNode:
      port: 21123
      enableDisk: true # Enable querynode load disk index, and search on disk index
    
    indexCoord:
      address: my-release-milvus-indexcoord
      port: 31000
      enableActiveStandby: true  # Enable indexcoord active-standby
    
    indexNode:
      port: 21121
      enableDisk: true # Enable index node build disk vector index
    
    dataCoord:
      address: my-release-milvus-datacoord
      port: 13333
      enableActiveStandby: true  # Enable datacoord active-standby
    
    dataNode:
      port: 21124
    
    log:
      level: info
      file:
        rootPath: ""
        maxSize: 300
        maxAge: 10
        maxBackups: 20
      format: text
  user.yaml: |-
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1
---
# Source: milvus/templates/attu-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
spec:
  type: NodePort
  ports:
    - name: attu
      protocol: TCP
      port: 3000
      targetPort: 3000
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "attu"
---
# Source: milvus/templates/datacoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
spec:
  type: ClusterIP
  ports:
    - name: datacoord
      port: 13333
      protocol: TCP
      targetPort: datacoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datacoord"
---
# Source: milvus/templates/datanode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datanode"
---
# Source: milvus/templates/indexcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
spec:
  type: ClusterIP
  ports:
    - name: indexcoord
      port: 31000
      protocol: TCP
      targetPort: indexcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexcoord"
---
# Source: milvus/templates/indexnode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexnode"
---
# Source: milvus/templates/querycoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
spec:
  type: ClusterIP
  ports:
    - name: querycoord
      port: 19531
      protocol: TCP
      targetPort: querycoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querycoord"
---
# Source: milvus/templates/querynode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querynode"
---
# Source: milvus/templates/rootcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
spec:
  type: ClusterIP
  ports:
    - name: rootcoord
      port: 53100
      protocol: TCP
      targetPort: rootcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "rootcoord"
---
# Source: milvus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"
spec:
  type: NodePort
  ports:
    - name: milvus
      port: 19530
      protocol: TCP
      targetPort: milvus
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "proxy"
---
# Source: milvus/templates/attu-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
    
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "attu"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "attu"
        
    spec:
      containers:
      - name: attu
        image: zilliz/attu:v2.3.10
        imagePullPolicy: IfNotPresent
        ports:
        - name: attu
          containerPort: 3000
          protocol: TCP
        env:
        - name: MILVUS_URL
          value: http://my-release-milvus:19530
        resources:
          {}
---
# Source: milvus/templates/datacoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datacoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datacoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datacoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datacoord" ]
        env:
        ports:
          - name: datacoord
            containerPort: 13333
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/datanode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datanode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datanode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datanode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datanode" ]
        env:
        ports:
          - name: datanode
            containerPort: 21124
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexcoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexcoord" ]
        env:
        ports:
          - name: indexcoord
            containerPort: 31000
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexnode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexnode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexnode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexnode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexnode" ]
        env:
        ports:
          - name: indexnode
            containerPort: 21121
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/proxy-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-proxy
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "proxy"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "proxy"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: proxy
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "proxy" ]
        env:
        ports:
          - name: milvus
            containerPort: 19530
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querycoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querycoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        
        component: "querycoord"
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querycoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querycoord" ]
        env:
        ports:
          - name: querycoord
            containerPort: 19531
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querynode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querynode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "querynode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querynode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querynode" ]
        env:
        ports:
          - name: querynode
            containerPort: 21123
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/rootcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "rootcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "rootcoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: rootcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "rootcoord" ]
        env:
        ports:
          - name: rootcoord
            containerPort: 53100
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}

1.pulsar 最好使用milvus 自建的,非必要不对接外部pulsar,在部署观察下来对接外部pulsar建立topic分区时会出现分区为2 的问题(milvus 要求保证数据的顺序性要求建立无分区的topic),导致部署失败,2.2.13版本的是可部署,但不可插入数据。2.4.5版本是直接部署失败。如果非要对接的话就要手动进行命令行执行建立topic

bin/pulsar-admin topics create  persistent://my-tenant/my-namespace/by-dev-rootcoord-dml_0
...
bin/pulsar-admin topics create  persistent://my-tenant/my-namespace/by-dev-rootcoord-dml_15

之前为了对接外部pulsar,付了很大的力气,后期还是选择使用milvus 自建的。

对接pulsar values.yaml 需要对接自己的sc,我这边使用的是nfs作为pvc

  zookeeper:
    volumes:
      data:
        storageClassName: "nfs"
  bookkeeper:
    replicaCount: 3
    volumes:
      journal:
#        name: journal
#       size: 100Gi
       storageClassName: "nfs"

      ledgers:
#        name: ledgers
#       size: 200Gi
       storageClassName: "nfs"

2.etcd 个人认为对接外部的对于本地部署验证 和 后期单独维护 相对好一些,但是最终验证还是选择milvus 自建。 以下两种方式的差异:
理由:
1).外部etcd 虚拟机部署,对于运维小白可以直接使用命令,删除掉上次缓存数据

 etcdctl --endpoints=http://xxx:23790 get --from-key ""
  etcdctl --endpoints=http://xxx:23790 del --from-key ""

2).milvus 绑定自建,也需要配置sc,进行持久化券绑定,如果想要多次创建验证 测试进行需要把pvc进行删除,否则会报错(如果不想增加运维困难,直接使用这种方式,但是是相对黑盒的,不是很了解k8s 就很烦)

  persistence:
    enabled: true
    storageClass: "nfs"
    accessMode: ReadWriteOnce
    size: 10Gi
 etcdctl --endpoints=http://xxx:23790 get --from-key ""
 etcdctl --endpoints=http://xxxx:23790 del --from-key ""
helm search repo milvus --versions
可能会发现没有最新的版本,不要慌,直接根据官网下载就行了,或者你从github 上下载最新的代码,拿到value,然后生成k8s可执行yaml,个人验证都可以,只是些许配置不同,并且要注意好下载最新的values.yaml的对应的镜像

根据value.yaml 生成k8s可执行yaml

helm template -f values.yaml  my-release milvus/milvus > milvus_manifest.yaml --debug
(需要等很久,而且可能会出现下载失败的问题,需要多尝试几次,这是因为网络问题,这个问题推了我很久...)
k8s部署
kubectl apply -f milvus_manifest.yaml
k8s删除部署
kubectl delete  -f milvus_manifest.yaml

3.pulsar manager 配置

    pulsar_manager: true

简单一些配置成nodePort访问

  pulsar_manager:
    service:
      type: NodePort

自建的pulsar manager 账号密码登录不上的问题:

CSRF_TOKEN=$(curl  http://xxxx:31772/pulsar-manager/csrf-token)
curl -H "X-XSRF-TOKEN: $CSRF_TOKEN" -H "Cookie: XSRF-TOKEN=$CSRF_TOKEN;" -H 'Content-Type: application/json' -X PUT http://xxx:31772/pulsar-manager/users/superuser -d '{"name": "pulsar", "password": "pulsar", "description": "test", "email": "username@test.org"}'

执行该命令,就可以pulsar pulsar登录,感谢公司运维!

milvus 建立的pulsar 和 etcd 对接外部s3 value 如下:

## Enable or disable Milvus Cluster mode
cluster:
  enabled: true

image:
  all:
    repository: milvusdb/milvus
    tag: v2.4.5
    pullPolicy: IfNotPresent
    ## Optionally specify an array of imagePullSecrets.
    ## Secrets must be manually created in the namespace.
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
    ##
    # pullSecrets:
    #   - myRegistryKeySecretName
  tools:
    repository: milvusdb/milvus-config-tool
    tag: v0.1.2
    pullPolicy: IfNotPresent

# Global node selector
# If set, this will apply to all milvus components
# Individual components can be set to a different node selector
nodeSelector: {}

# Global tolerations
# If set, this will apply to all milvus components
# Individual components can be set to a different tolerations
tolerations: []

# Global affinity
# If set, this will apply to all milvus components
# Individual components can be set to a different affinity
affinity: {}

# Global labels and annotations
# If set, this will apply to all milvus components
labels: {}
annotations: {}

# Extra configs for milvus.yaml
# If set, this config will merge into milvus.yaml
# Please follow the config structure in the milvus.yaml
# at https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml
# Note: this config will be the top priority which will override the config
# in the image and helm chart.
extraConfigFiles:
  user.yaml: |+
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1

## Expose the Milvus service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
  type: NodePort
  port: 19530
  portName: milvus
  nodePort: ""
  annotations: {}
  labels: {}

  ## List of IP addresses at which the Milvus service is available
  ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  ##
  externalIPs: []
  #   - externalIp1

  # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
  # set allowed inbound rules on the security group assigned to the master load balancer
  loadBalancerSourceRanges:
  - 0.0.0.0/0
  # Optionally assign a known public LB IP
  # loadBalancerIP: 1.2.3.4

ingress:
  enabled: false
  annotations:
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/backend-protocol: GRPC
    nginx.ingress.kubernetes.io/listen-ports-ssl: '[19530]'
    nginx.ingress.kubernetes.io/proxy-body-size: 4m
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
  labels: {}
  rules:
    - host: "milvus-example.local"
      path: "/"
      pathType: "Prefix"
    # - host: "milvus-example2.local"
    #   path: "/otherpath"
    #   pathType: "Prefix"
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - milvus-example.local

serviceAccount:
  create: false
  name:
  annotations:
  labels:

metrics:
  enabled: true

  serviceMonitor:
    # Set this to `true` to create ServiceMonitor for Prometheus operator
    enabled: false
    interval: "30s"
    scrapeTimeout: "10s"
    # Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
    additionalLabels: {}

livenessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 30
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

readinessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 10
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

log:
  level: "info"
  file:
    maxSize: 300    # MB
    maxAge: 10    # day
    maxBackups: 20
  format: "text"    # text/json

  persistence:
    mountPath: "/milvus/logs"
    ## If true, create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: false
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Logs Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ## ReadWriteMany access mode required for milvus cluster.
      ##
      storageClass:
      accessModes: ReadWriteMany
      size: 10Gi
      subPath: ""

## Heaptrack traces all memory allocations and annotates these events with stack traces.
## See more: https://github.com/KDE/heaptrack
## Enable heaptrack in production is not recommended.
heaptrack:
  image:
    repository: milvusdb/heaptrack
    tag: v0.1.0
    pullPolicy: IfNotPresent

standalone:
  replicas: 1  # Run standalone mode with replication disabled
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling

  ## Default message queue for milvus standalone
  ## Supported value: rocksmq, natsmq, pulsar and kafka
  messageQueue: rocksmq
  persistence:
    mountPath: "/var/lib/milvus"
    ## If true, alertmanager will create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: true
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ##
      storageClass: "csi-driver-s3"
      accessModes: ReadWriteOnce
      size: 50Gi
      subPath: ""

proxy:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  http:
    enabled: true  # whether to enable http rest server
    debugMode:
      enabled: false
  # Mount a TLS secret into proxy pod
  tls:
    enabled: false
## when enabling proxy.tls, all items below should be uncommented and the key and crt values should be populated.
#    enabled: true
#    secretName: milvus-tls
## expecting base64 encoded values here: i.e. $(cat tls.crt | base64 -w 0) and $(cat tls.key | base64 -w 0)
#    key: LS0tLS1CRUdJTiBQU--REDUCT
#    crt: LS0tLS1CRUdJTiBDR--REDUCT
#  volumes:
#  - secret:
#      secretName: milvus-tls
#    name: milvus-tls
#  volumeMounts:
#  - mountPath: /etc/milvus/certs/
#    name: milvus-tls
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

rootCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Root Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for root coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 53100
    annotations: {}
    labels: {}
    clusterIP: ""

queryCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Query Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for query coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 19531
    annotations: {}
    labels: {}
    clusterIP: ""

queryNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true  # Enable querynode load disk index, and search on disk index
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

indexCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1   # Run Index Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for index coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 31000
    annotations: {}
    labels: {}
    clusterIP: ""

indexNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # limits:
  #    ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  disk:
    enabled: true  # Enable index node build disk vector index
    size:
      enabled: false  # Enable local storage size limit
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

dataCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Data Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for data coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 13333
    annotations: {}
    labels: {}
    clusterIP: ""

dataNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

## mixCoordinator contains all coord
## If you want to use mixcoord, enable this and disable all of other coords
mixCoordinator:
  enabled: false
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Mixture Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for Mixture coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    annotations: {}
    labels: {}
    clusterIP: ""

attu:
  enabled: true
  name: attu
  image:
    repository: zilliz/attu
    tag: v2.3.10
    pullPolicy: IfNotPresent
  service:
    annotations: {}
    labels: {}
    type: NodePort
    port: 3000
    # loadBalancerIP: ""
  resources: {}
  podLabels: {}
  ingress:
    enabled: false
    annotations: {}
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    labels: {}
    hosts:
      - milvus-attu.local
    tls: []
    #  - secretName: chart-attu-tls
    #    hosts:
    #      - milvus-attu.local


## Configuration values for the minio dependency
## ref: https://github.com/zilliztech/milvus-helm/blob/master/charts/minio/README.md
##

minio:
  enabled: false
  name: minio
  mode: distributed
  image:
    tag: "RELEASE.2023-03-20T20-16-18Z"
    pullPolicy: IfNotPresent
  accessKey: minioadmin
  secretKey: minioadmin
  existingSecret: ""
  bucketName: "milvus-bucket"
  rootPath: file
  useIAM: false
  iamEndpoint: ""
  region: ""
  useVirtualHost: false
  podDisruptionBudget:
    enabled: false
  resources:
    requests:
      memory: 2Gi

  service:
    type: ClusterIP
    port: 9000

  persistence:
    enabled: true
    existingClaim: ""
    storageClass: "csi-driver-s3"
    accessMode: ReadWriteOnce
    size: 500Gi

  livenessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 5

  readinessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 1
    successThreshold: 1
    failureThreshold: 5

  startupProbe:
    enabled: true
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 60

## Configuration values for the etcd dependency
## ref: https://artifacthub.io/packages/helm/bitnami/etcd
##

etcd:
  enabled: true
  name: etcd
  replicaCount: 3
  pdb:
    create: false
  image:
    repository: "milvusdb/etcd"
    tag: "3.5.5-r4"
    pullPolicy: IfNotPresent

  service:
    type: ClusterIP
    port: 2379
    peerPort: 2380

  auth:
    rbac:
      enabled: false

  persistence:
    enabled: true
    storageClass: "nfs"
    accessMode: ReadWriteOnce
    size: 10Gi

  ## Change default timeout periods to mitigate zoobie probe process
  livenessProbe:
    enabled: true
    timeoutSeconds: 10

  readinessProbe:
    enabled: true
    periodSeconds: 20
    timeoutSeconds: 10

  ## Enable auto compaction
  ## compaction by every 1000 revision
  ##
  autoCompactionMode: revision
  autoCompactionRetention: "1000"

  ## Increase default quota to 4G
  ##
  extraEnvVars:
  - name: ETCD_QUOTA_BACKEND_BYTES
    value: "4294967296"
  - name: ETCD_HEARTBEAT_INTERVAL
    value: "500"
  - name: ETCD_ELECTION_TIMEOUT
    value: "2500"

## Configuration values for the pulsar dependency
## ref: https://github.com/apache/pulsar-helm-chart
##

pulsar:
  enabled: true
  name: pulsar

  fullnameOverride: ""
  persistence: true

  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.

  rbac:
    enabled: false
    psp: false
    limit_to_namespace: true

  affinity:
    anti_affinity: false

## enableAntiAffinity: no

  components:
    zookeeper: true
    bookkeeper: true
    # bookkeeper - autorecovery
    autorecovery: true
    broker: true
    functions: false
    proxy: true
    toolset: false
    pulsar_manager: true

  monitoring:
    prometheus: false
    grafana: false
    node_exporter: false
    alert_manager: false

  images:
    broker:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    autorecovery:
      repository: apachepulsar/pulsar
      tag: 2.8.2
      pullPolicy: IfNotPresent
    zookeeper:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    bookie:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    proxy:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    pulsar_manager:
      repository: apachepulsar/pulsar-manager
      pullPolicy: IfNotPresent
      tag: v0.2.0

  zookeeper:
    volumes:
      data:
        storageClassName: "nfs"
    resources:
      requests:
        memory: 1024Mi
        cpu: 0.3
    configData:
      PULSAR_MEM: >
        -Xms1024m
        -Xmx1024m
      PULSAR_GC: >
         -Dcom.sun.management.jmxremote
         -Djute.maxbuffer=10485760
         -XX:+ParallelRefProcEnabled
         -XX:+UnlockExperimentalVMOptions
         -XX:+DoEscapeAnalysis
         -XX:+DisableExplicitGC
         -XX:+PerfDisableSharedMem
         -Dzookeeper.forceSync=no
    pdb:
      usePolicy: false

  bookkeeper:
    replicaCount: 3
    volumes:
      journal:
#        name: journal
#       size: 100Gi
       storageClassName: "nfs"

      ledgers:
#        name: ledgers
#       size: 200Gi
       storageClassName: "nfs"
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+UseG1GC -XX:MaxGCPauseMillis=10
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
        -XX:+PerfDisableSharedMem
        -XX:+PrintGCDetails
      nettyMaxFrameSizeBytes: "104867840"
    pdb:
      usePolicy: false

  broker:
    component: broker
    podMonitor:
      enabled: false
    replicaCount: 1
    resources:
      requests:
        memory: 4096Mi
        cpu: 1.5
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
      maxMessageSize: "104857600"
      defaultRetentionTimeInMinutes: "10080"
      defaultRetentionSizeInMB: "-1"
      backlogQuotaDefaultLimitGB: "8"
      ttlDurationDefaultInSeconds: "259200"
      subscriptionExpirationTimeMinutes: "3"
      backlogQuotaDefaultRetentionPolicy: producer_exception
    pdb:
      usePolicy: false

  autorecovery:
    resources:
      requests:
        memory: 512Mi
        cpu: 1

  proxy:
    replicaCount: 1
    podMonitor:
      enabled: false
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    service:
      type: ClusterIP
    ports:
      pulsar: 6650
    configData:
      PULSAR_MEM: >
        -Xms2048m -Xmx2048m
      PULSAR_GC: >
        -XX:MaxDirectMemorySize=2048m
      httpNumThreads: "100"
    pdb:
      usePolicy: false

  pulsar_manager:
    service:
      type: ClusterIP

  pulsar_metadata:
    component: pulsar-init
    image:
      # the image used for running `pulsar-cluster-initialize` job
      repository: apachepulsar/pulsar
      tag: 2.8.2


## Configuration values for the kafka dependency
## ref: https://artifacthub.io/packages/helm/bitnami/kafka
##

kafka:
  enabled: false
  name: kafka
  replicaCount: 3
  image:
    repository: bitnami/kafka
    tag: 3.1.0-debian-10-r52
  ## Increase graceful termination for kafka graceful shutdown
  terminationGracePeriodSeconds: "90"
  pdb:
    create: false

  ## Enable startup probe to prevent pod restart during recovering
  startupProbe:
    enabled: true

  ## Kafka Java Heap size
  heapOpts: "-Xmx4096m -Xms4096m"
  maxMessageBytes: _10485760
  defaultReplicationFactor: 3
  offsetsTopicReplicationFactor: 3
  ## Only enable time based log retention
  logRetentionHours: 168
  logRetentionBytes: _-1
  extraEnvVars:
  - name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
    value: "5242880"
  - name: KAFKA_CFG_MAX_REQUEST_SIZE
    value: "5242880"
  - name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES
    value: "10485760"
  - name: KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES
    value: "5242880"
  - name: KAFKA_CFG_LOG_ROLL_HOURS
    value: "24"

  persistence:
    enabled: true
    storageClass:
    accessMode: ReadWriteOnce
    size: 300Gi

  metrics:
    ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
    kafka:
      enabled: false
      image:
        repository: bitnami/kafka-exporter
        tag: 1.4.2-debian-10-r182

    ## Prometheus JMX exporter: exposes the majority of Kafkas metrics
    jmx:
      enabled: false
      image:
        repository: bitnami/jmx-exporter
        tag: 0.16.1-debian-10-r245

    ## To enable serviceMonitor, you must enable either kafka exporter or jmx exporter.
    ## And you can enable them both
    serviceMonitor:
      enabled: false

  service:
    type: ClusterIP
    ports:
      client: 9092

  zookeeper:
    enabled: true
    replicaCount: 3

###################################
# External S3
# - these configs are only used when `externalS3.enabled` is true
###################################
externalS3:
  enabled: true
  host: "xxx"
  port: "9000"
  accessKey: "xxx"
  secretKey: "xx"
  useSSL: false
  bucketName: "xxx
  rootPath: "xxx"
  useIAM: false
  cloudProvider: "aws"
  iamEndpoint: ""
  region: ""
  useVirtualHost: false

###################################
# GCS Gateway
# - these configs are only used when `minio.gcsgateway.enabled` is true
###################################
externalGcs:
  bucketName: ""

###################################
# External etcd
# - these configs are only used when `externalEtcd.enabled` is true
###################################
externalEtcd:
  enabled: false

  ## the endpoints of the external etcd
  ##
  endpoints:
    - xxx:23790

###################################
# External pulsar
# - these configs are only used when `externalPulsar.enabled` is true
###################################
externalPulsar:
  enabled: false
  host: "xxx"
  port: 30012
  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
  tenant: "xx"
  namespace: "x"
  authPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
  authParams: token:"xx"

###################################
# External kafka
# - these configs are only used when `externalKafka.enabled` is true
# - note that the following are just examples, you should confirm the
#   value of brokerList and mechanisms according to the actual external
#   Kafka configuration. E.g. If you select the AWS MSK, the configuration
#   should look something like this:
#   externalKafka:
#     enabled: true
#     brokerList: "xxxx:9096"
#     securityProtocol: SASL_SSL
#     sasl:
#       mechanisms: SCRAM-SHA-512
#       password: "xxx"
#       username: "xxx"
###################################
externalKafka:
  enabled: false
  brokerList: localhost:9092
  securityProtocol: SASL_SSL
  sasl:
    mechanisms: PLAIN
    username: ""
    password: ""

---
# Source: milvus/charts/pulsar/templates/broker-service-account.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ServiceAccount
metadata:
  name: "my-release-pulsar-broker-acct"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: broker
  annotations:
---
# Source: milvus/charts/pulsar/templates/pulsar-manager-admin-secret.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Secret
metadata:
  name: "my-release-pulsar-pulsar-manager-secret"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    component: pulsar-manager
    cluster: my-release-pulsar
type: Opaque
data:
  PULSAR_MANAGER_ADMIN_PASSWORD: cHVsc2Fy
  PULSAR_MANAGER_ADMIN_USER: cHVsc2Fy
---
# Source: milvus/charts/pulsar/templates/autorecovery-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-recovery"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: recovery
data:
  # common config
  zkServers: "my-release-pulsar-zookeeper:2181"
  zkLedgersRootPath: "/ledgers"
  # enable bookkeeper http server
  httpServerEnabled: "true"
  httpServerPort: "8000"
  # config the stats provider
  statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
  # use hostname as the bookie id
  useHostNameAsBookieID: "true"
  BOOKIE_MEM: |
    -Xms64m -Xmx64m
---
# Source: milvus/charts/pulsar/templates/bookkeeper-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-bookie"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: bookie
data:
  # common config
  zkServers: "my-release-pulsar-zookeeper:2181"
  zkLedgersRootPath: "/ledgers"
  # enable bookkeeper http server
  httpServerEnabled: "true"
  httpServerPort: "8000"
  # config the stats provider
  statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
  # use hostname as the bookie id
  useHostNameAsBookieID: "true"
  # disable auto recovery on bookies since we will start AutoRecovery in separated pods
  autoRecoveryDaemonEnabled: "false"
  # Do not retain journal files as it increase the disk utilization
  journalMaxBackups: "0"
  journalDirectories: "/pulsar/data/bookkeeper/journal"
  PULSAR_PREFIX_journalDirectories: "/pulsar/data/bookkeeper/journal"
  ledgerDirectories: "/pulsar/data/bookkeeper/ledgers"
  # TLS config
  
  PULSAR_GC: |
    -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
  PULSAR_MEM: |
    -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
  dbStorage_readAheadCacheMaxSizeMb: "32"
  dbStorage_rocksDB_blockCacheSize: "8388608"
  dbStorage_rocksDB_writeBufferSizeMB: "8"
  dbStorage_writeCacheMaxSizeMb: "32"
  nettyMaxFrameSizeBytes: "104867840"
---
# Source: milvus/charts/pulsar/templates/broker-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-broker"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: broker
data:
  # Metadata settings
  zookeeperServers: "my-release-pulsar-zookeeper:2181"
  configurationStoreServers: "my-release-pulsar-zookeeper:2181"

  # Broker settings
  clusterName: my-release-pulsar
  exposeTopicLevelMetricsInPrometheus: "true"
  numHttpServerThreads: "8"
  zooKeeperSessionTimeoutMillis: "30000"
  statusFilePath: "/pulsar/status"

  # Function Worker Settings
  # function worker configuration
  functionsWorkerEnabled: "false"

  # prometheus needs to access /metrics endpoint
  webServicePort: "8080"
  brokerServicePort: "6650"

  # Authentication Settings
  PULSAR_GC: |
    -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
  PULSAR_MEM: |
    -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
  backlogQuotaDefaultLimitGB: "8"
  backlogQuotaDefaultRetentionPolicy: producer_exception
  defaultRetentionSizeInMB: "-1"
  defaultRetentionTimeInMinutes: "10080"
  managedLedgerDefaultAckQuorum: "2"
  managedLedgerDefaultEnsembleSize: "2"
  managedLedgerDefaultWriteQuorum: "2"
  maxMessageSize: "104857600"
  subscriptionExpirationTimeMinutes: "3"
  ttlDurationDefaultInSeconds: "259200"
---
# Source: milvus/charts/pulsar/templates/proxy-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-proxy"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: proxy
data:
  clusterName: my-release-pulsar
  httpNumThreads: "8"
  statusFilePath: "/pulsar/status"
  # prometheus needs to access /metrics endpoint
  webServicePort: "80"
  servicePort: "6650"
  brokerServiceURL: pulsar://my-release-pulsar-broker:6650
  brokerWebServiceURL: http://my-release-pulsar-broker:8080

  # Authentication Settings
  PULSAR_GC: |
    -XX:MaxDirectMemorySize=2048m
  PULSAR_MEM: |
    -Xms2048m -Xmx2048m
  httpNumThreads: "100"
---
# Source: milvus/charts/pulsar/templates/pulsar-manager-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-pulsar-manager"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: pulsar-manager
data:
  DRIVER_CLASS_NAME: org.postgresql.Driver
  LOG_LEVEL: DEBUG
  REDIRECT_HOST: http://127.0.0.1
  REDIRECT_PORT: "9527"
  URL: jdbc:postgresql://127.0.0.1:5432/pulsar_manager
---
# Source: milvus/charts/pulsar/templates/zookeeper-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper only when `components.zookeeper` is true
apiVersion: v1
kind: ConfigMap
metadata:
  name: "my-release-pulsar-zookeeper"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: zookeeper
data:
  dataDir: /pulsar/data/zookeeper
  PULSAR_PREFIX_serverCnxnFactory: org.apache.zookeeper.server.NIOServerCnxnFactory
  serverCnxnFactory: org.apache.zookeeper.server.NIOServerCnxnFactory
  PULSAR_GC: |
    -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
  PULSAR_MEM: |
    -Xms1024m -Xmx1024m
---
# Source: milvus/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: my-release-milvus
data:
  default.yaml: |+
    # Copyright (C) 2019-2021 Zilliz. All rights reserved.
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
    # with the License. You may obtain a copy of the License at
    #
    # http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software distributed under the License
    # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
    # or implied. See the License for the specific language governing permissions and limitations under the License.
    
    etcd:
      endpoints:
        - my-release-etcd:2379
    
    metastore:
      type: etcd
    
    minio:
      address: obs.hw.s3.com.cn
      port: 5080
      accessKeyID: BE8D0CE99ABDFE159CAB
      secretAccessKey: 7g6Hy2A0H3CT10eYt99lqfJ0gxUAAAGQmr3+FYeA
      useSSL: false
      bucketName: ksz-kp-test
      rootPath: /test_0729
      useIAM: false
      cloudProvider: aws
      iamEndpoint: 
    
    messageQueue: pulsar
    
    pulsar:
      address: my-release-pulsar-proxy
      port: 6650
      maxMessageSize: 5242880
    
    rootCoord:
      address: my-release-milvus-rootcoord
      port: 53100
      enableActiveStandby: true  # Enable active-standby
    
    proxy:
      port: 19530
      internalPort: 19529
    
    queryCoord:
      address: my-release-milvus-querycoord
      port: 19531
    
      enableActiveStandby: true  # Enable active-standby
    
    queryNode:
      port: 21123
      enableDisk: true # Enable querynode load disk index, and search on disk index
    
    indexCoord:
      address: my-release-milvus-indexcoord
      port: 31000
      enableActiveStandby: true  # Enable active-standby
    
    indexNode:
      port: 21121
      enableDisk: true # Enable index node build disk vector index
    
    dataCoord:
      address: my-release-milvus-datacoord
      port: 13333
      enableActiveStandby: true  # Enable active-standby
    
    dataNode:
      port: 21124
    
    log:
      level: info
      file:
        rootPath: ""
        maxSize: 300
        maxAge: 10
        maxBackups: 20
      format: text
  user.yaml: |-
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1
---
# Source: milvus/charts/pulsar/templates/broker-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: "my-release-pulsar-broker-role"
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
rules:
- apiGroups: [""]
  resources:
  - configmaps
  verbs: ["get", "list", "watch"]
- apiGroups: ["", "extensions", "apps"]
  resources:
    - pods
    - services
    - deployments
    - secrets
    - statefulsets
  verbs:
    - list
    - watch
    - get
    - update
    - create
    - delete
    - patch
---
# Source: milvus/charts/pulsar/templates/broker-cluster-role-binding.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
## TODO create our own cluster role with less privledges than admin
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: "my-release-pulsar-broker-rolebinding"
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "my-release-pulsar-broker-role"
subjects:
- kind: ServiceAccount
  name: "my-release-pulsar-broker-acct"
  namespace: default
---
# Source: milvus/charts/etcd/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-etcd-headless
  namespace: default
  labels:
    app.kubernetes.io/name: etcd
    helm.sh/chart: etcd-6.3.3
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/managed-by: Helm
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  type: ClusterIP
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
    - name: "client"
      port: 2379
      targetPort: client
    - name: "peer"
      port: 2380
      targetPort: peer
  selector:
    app.kubernetes.io/name: etcd
    app.kubernetes.io/instance: my-release
---
# Source: milvus/charts/etcd/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-etcd
  namespace: default
  labels:
    app.kubernetes.io/name: etcd
    helm.sh/chart: etcd-6.3.3
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/managed-by: Helm
  annotations:
spec:
  type: ClusterIP
  ports:
    - name: "client"
      port: 2379
      targetPort: client
      nodePort: null
    - name: "peer"
      port: 2380
      targetPort: peer
      nodePort: null
  selector:
    app.kubernetes.io/name: etcd
    app.kubernetes.io/instance: my-release
---
# Source: milvus/charts/pulsar/templates/autorecovery-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-recovery"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: recovery
spec:
  ports:
  - name: http
    port: 8000
  clusterIP: None
  selector:
    app: pulsar
    release: my-release
    component: recovery
---
# Source: milvus/charts/pulsar/templates/bookkeeper-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-bookie"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: bookie
spec:
  ports:
  - name: "bookie"
    port: 3181
  - name: http
    port: 8000
  clusterIP: None
  selector:
    app: pulsar
    release: my-release
    component: bookie
  publishNotReadyAddresses: true
---
# Source: milvus/charts/pulsar/templates/broker-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-broker"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: broker
  annotations:
    {}
spec:
  ports:
  # prometheus needs to access /metrics endpoint
  - name: http
    port: 8080
  - name: "pulsar"
    port: 6650
  clusterIP: None
  selector:
    app: pulsar
    release: my-release
    component: broker
---
# Source: milvus/charts/pulsar/templates/proxy-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-proxy"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: proxy
  annotations:
spec:
  type: ClusterIP
  ports:
    - name: http
      port: 80
      protocol: TCP
    - name: "pulsar"
      port: 6650
      protocol: TCP
  selector:
    app: pulsar
    release: my-release
    component: proxy
---
# Source: milvus/charts/pulsar/templates/pulsar-manager-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-pulsar-manager"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: pulsar-manager
  annotations:
    {}
spec:
  type: ClusterIP
  ports:
    - name: server
      port: 9527
      targetPort: 9527
      protocol: TCP
  selector:
    app: pulsar
    release: my-release
    component: pulsar-manager
---
# Source: milvus/charts/pulsar/templates/zookeeper-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper only when `components.zookeeper` is true
apiVersion: v1
kind: Service
metadata:
  name: "my-release-pulsar-zookeeper"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: zookeeper
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  ports:
    # prometheus needs to access /metrics endpoint
    - name: http
      port: 8000
    - name: "follower"
      port: 2888
    - name: "leader-election"
      port: 3888
    - name: "client"
      port: 2181
  clusterIP: None
  selector:
    app: pulsar
    release: my-release
    component: zookeeper
---
# Source: milvus/templates/attu-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
spec:
  type: NodePort
  ports:
    - name: attu
      protocol: TCP
      port: 3000
      targetPort: 3000
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "attu"
---
# Source: milvus/templates/datacoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
spec:
  type: ClusterIP
  ports:
    - name: datacoord
      port: 13333
      protocol: TCP
      targetPort: datacoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datacoord"
---
# Source: milvus/templates/datanode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datanode"
---
# Source: milvus/templates/indexcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
spec:
  type: ClusterIP
  ports:
    - name: indexcoord
      port: 31000
      protocol: TCP
      targetPort: indexcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexcoord"
---
# Source: milvus/templates/indexnode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexnode"
---
# Source: milvus/templates/querycoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
spec:
  type: ClusterIP
  ports:
    - name: querycoord
      port: 19531
      protocol: TCP
      targetPort: querycoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querycoord"
---
# Source: milvus/templates/querynode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querynode"
---
# Source: milvus/templates/rootcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
spec:
  type: ClusterIP
  ports:
    - name: rootcoord
      port: 53100
      protocol: TCP
      targetPort: rootcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "rootcoord"
---
# Source: milvus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"
spec:
  type: NodePort
  ports:
    - name: milvus
      port: 19530
      protocol: TCP
      targetPort: milvus
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "proxy"
---
# Source: milvus/charts/pulsar/templates/pulsar-manager-deployment.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
  name: "my-release-pulsar-pulsar-manager"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: pulsar-manager
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: pulsar-manager
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: pulsar-manager
      annotations:
        {}
    spec:
      terminationGracePeriodSeconds: 30
      containers:
        - name: "my-release-pulsar-pulsar-manager"
          image: "apachepulsar/pulsar-manager:v0.2.0"
          imagePullPolicy: IfNotPresent
          resources:
            requests:
              cpu: 0.1
              memory: 250Mi
          ports:
          - containerPort: 9527
          volumeMounts:
          - name: pulsar-manager-data
            mountPath: /data
          envFrom:
          - configMapRef:
              name: "my-release-pulsar-pulsar-manager"
          env:
          - name: PULSAR_CLUSTER
            value: my-release-pulsar
          - name: USERNAME
            valueFrom:
              secretKeyRef:
                key: PULSAR_MANAGER_ADMIN_USER
                name: "my-release-pulsar-pulsar-manager-secret"
          - name: PASSWORD
            valueFrom:
              secretKeyRef:
                key: PULSAR_MANAGER_ADMIN_PASSWORD
                name: "my-release-pulsar-pulsar-manager-secret"
      
      volumes:
        - name: pulsar-manager-data
          emptyDir: {}
---
# Source: milvus/templates/attu-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "attu"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "attu"
    spec:
      containers:
      - name: attu
        image: zilliz/attu:v2.3.10
        imagePullPolicy: IfNotPresent
        ports:
        - name: attu
          containerPort: 3000
          protocol: TCP
        env:
        - name: MILVUS_URL
          value: http://my-release-milvus:19530
        resources:
          {}
---
# Source: milvus/templates/datacoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
    
  annotations:
    

spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datacoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datacoord"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datacoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datacoord" ]
        env:
        ports:
          - name: datacoord
            containerPort: 13333
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/datanode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datanode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datanode"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datanode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datanode" ]
        env:
        ports:
          - name: datanode
            containerPort: 21124
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
    
  annotations:
    

spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexcoord"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexcoord" ]
        env:
        ports:
          - name: indexcoord
            containerPort: 31000
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexnode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexnode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexnode"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexnode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexnode" ]
        env:
        ports:
          - name: indexnode
            containerPort: 21121
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/proxy-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-proxy
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "proxy"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "proxy"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: proxy
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "proxy" ]
        env:
        ports:
          - name: milvus
            containerPort: 19530
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querycoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
    
  annotations:
    

spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querycoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        
        component: "querycoord"
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querycoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querycoord" ]
        env:
        ports:
          - name: querycoord
            containerPort: 19531
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querynode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querynode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "querynode"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querynode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querynode" ]
        env:
        ports:
          - name: querynode
            containerPort: 21123
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/rootcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.0.31
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.2.13"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
    
  annotations:
    

spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "rootcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "rootcoord"
        
      annotations:
        checksum/config: 76399fc55730fc61154e9aadebb5ec5db9e8182a02c7edf9c5fd770b5b149b75
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: rootcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "rootcoord" ]
        env:
        ports:
          - name: rootcoord
            containerPort: 53100
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/charts/etcd/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: my-release-etcd
  namespace: default
  labels:
    app.kubernetes.io/name: etcd
    helm.sh/chart: etcd-6.3.3
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/managed-by: Helm
spec:
  replicas: 3
  selector:
    matchLabels:
      app.kubernetes.io/name: etcd
      app.kubernetes.io/instance: my-release
  serviceName: my-release-etcd-headless
  podManagementPolicy: Parallel
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app.kubernetes.io/name: etcd
        helm.sh/chart: etcd-6.3.3
        app.kubernetes.io/instance: my-release
        app.kubernetes.io/managed-by: Helm
      annotations:
    spec:
      
      affinity:
        podAffinity:
          
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - podAffinityTerm:
                labelSelector:
                  matchLabels:
                    app.kubernetes.io/name: etcd
                    app.kubernetes.io/instance: my-release
                namespaces:
                  - "default"
                topologyKey: kubernetes.io/hostname
              weight: 1
        nodeAffinity:
          
      securityContext:
        fsGroup: 1001
      serviceAccountName: "default"
      containers:
        - name: etcd
          image: docker.io/milvusdb/etcd:3.5.5-r4
          imagePullPolicy: "IfNotPresent"
          securityContext:
            runAsNonRoot: true
            runAsUser: 1001
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: MY_POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: MY_POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: ETCDCTL_API
              value: "3"
            - name: ETCD_ON_K8S
              value: "yes"
            - name: ETCD_START_FROM_SNAPSHOT
              value: "no"
            - name: ETCD_DISASTER_RECOVERY
              value: "no"
            - name: ETCD_NAME
              value: "$(MY_POD_NAME)"
            - name: ETCD_DATA_DIR
              value: "/bitnami/etcd/data"
            - name: ETCD_LOG_LEVEL
              value: "info"
            - name: ALLOW_NONE_AUTHENTICATION
              value: "yes"
            - name: ETCD_ADVERTISE_CLIENT_URLS
              value: "http://$(MY_POD_NAME).my-release-etcd-headless.default.svc.cluster.local:2379"
            - name: ETCD_LISTEN_CLIENT_URLS
              value: "http://0.0.0.0:2379"
            - name: ETCD_INITIAL_ADVERTISE_PEER_URLS
              value: "http://$(MY_POD_NAME).my-release-etcd-headless.default.svc.cluster.local:2380"
            - name: ETCD_LISTEN_PEER_URLS
              value: "http://0.0.0.0:2380"
            - name: ETCD_AUTO_COMPACTION_MODE
              value: "revision"
            - name: ETCD_AUTO_COMPACTION_RETENTION
              value: "1000"
            - name: ETCD_INITIAL_CLUSTER_TOKEN
              value: "etcd-cluster-k8s"
            - name: ETCD_INITIAL_CLUSTER_STATE
              value: "new"
            - name: ETCD_INITIAL_CLUSTER
              value: "my-release-etcd-0=http://my-release-etcd-0.my-release-etcd-headless.default.svc.cluster.local:2380,my-release-etcd-1=http://my-release-etcd-1.my-release-etcd-headless.default.svc.cluster.local:2380,my-release-etcd-2=http://my-release-etcd-2.my-release-etcd-headless.default.svc.cluster.local:2380"
            - name: ETCD_CLUSTER_DOMAIN
              value: "my-release-etcd-headless.default.svc.cluster.local"
            - name: ETCD_QUOTA_BACKEND_BYTES
              value: "4294967296"
            - name: ETCD_HEARTBEAT_INTERVAL
              value: "500"
            - name: ETCD_ELECTION_TIMEOUT
              value: "2500"
          envFrom:
          ports:
            - name: client
              containerPort: 2379
              protocol: TCP
            - name: peer
              containerPort: 2380
              protocol: TCP
          livenessProbe:
            exec:
              command:
                - /opt/bitnami/scripts/etcd/healthcheck.sh
            initialDelaySeconds: 60
            periodSeconds: 30
            timeoutSeconds: 10
            successThreshold: 1
            failureThreshold: 5
          readinessProbe:
            exec:
              command:
                - /opt/bitnami/scripts/etcd/healthcheck.sh
            initialDelaySeconds: 60
            periodSeconds: 20
            timeoutSeconds: 10
            successThreshold: 1
            failureThreshold: 5
          lifecycle:
            preStop:
              exec:
                command:
                  - /opt/bitnami/scripts/etcd/prestop.sh
          resources:
            limits: {}
            requests: {}
          volumeMounts:
            - name: data
              mountPath: /bitnami/etcd
      volumes:
  volumeClaimTemplates:
    - metadata:
        name: data
      spec:
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: "10Gi"
        storageClassName: nfs
---
# Source: milvus/charts/pulsar/templates/autorecovery-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: "my-release-pulsar-recovery"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: recovery
spec:
  serviceName: "my-release-pulsar-recovery"
  replicas: 1
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  # nodeSelector:
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: recovery
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: recovery
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "8000"
    spec:
      affinity:
      terminationGracePeriodSeconds: 30
      initContainers:
      # This initContainer will wait for bookkeeper initnewcluster to complete
      # before deploying the bookies
      - name: pulsar-bookkeeper-verify-clusterid
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/bookkeeper.conf;until bin/bookkeeper shell whatisinstanceid; do
            sleep 3;
          done;
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-recovery"
        volumeMounts:
        
      containers:
      - name: "my-release-pulsar-recovery"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            cpu: 1
            memory: 512Mi
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/bookkeeper.conf;
          
          OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/bookkeeper autorecovery
        ports:
        - name: http
          containerPort: 8000
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-recovery"
        volumeMounts:
        
      volumes:
---
# Source: milvus/charts/pulsar/templates/bookkeeper-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: "my-release-pulsar-bookie"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: bookie
spec:
  serviceName: "my-release-pulsar-bookie"
  replicas: 3
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: bookie
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: bookie
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "8000"
    spec:
      affinity:
      terminationGracePeriodSeconds: 30
      initContainers:
      # This initContainer will wait for bookkeeper initnewcluster to complete
      # before deploying the bookies
      - name: pulsar-bookkeeper-verify-clusterid
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
        # only reformat bookie if bookkeeper is running without persistence
        - >
          
          set -e;
          bin/apply-config-from-env.py conf/bookkeeper.conf;until bin/bookkeeper shell whatisinstanceid; do
            sleep 3;
          done;
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-bookie"
        volumeMounts:
        
      containers:
      - name: "my-release-pulsar-bookie"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        livenessProbe:
          httpGet:
            path: /api/v1/bookie/state
            port: 8000
          initialDelaySeconds: 10
          periodSeconds: 30
          timeoutSeconds: 5
          failureThreshold: 60
        readinessProbe:
          httpGet:
            path: /api/v1/bookie/is_ready
            port: 8000
          initialDelaySeconds: 10
          periodSeconds: 30
          timeoutSeconds: 5
          failureThreshold: 60
        resources:
          requests:
            cpu: 1
            memory: 2048Mi
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/bookkeeper.conf;
          
          OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar bookie;
        ports:
        - name: "bookie"
          containerPort: 3181
        - name: http
          containerPort: 8000
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-bookie"
        volumeMounts:
        - name: "my-release-pulsar-bookie-journal"
          mountPath: /pulsar/data/bookkeeper/journal
        - name: "my-release-pulsar-bookie-ledgers"
          mountPath: /pulsar/data/bookkeeper/ledgers
        
      volumes:
      
      
  volumeClaimTemplates:
  - metadata:
      name: "my-release-pulsar-bookie-journal"
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 100Gi
      storageClassName: "nfs"
  - metadata:
      name: "my-release-pulsar-bookie-ledgers"
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 200Gi
      storageClassName: "nfs"
---
# Source: milvus/charts/pulsar/templates/broker-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: "my-release-pulsar-broker"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: broker
spec:
  serviceName: "my-release-pulsar-broker"
  replicas: 1
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: broker
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: broker
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "8080"
    spec:
      serviceAccountName: "my-release-pulsar-broker-acct"
      affinity:
      terminationGracePeriodSeconds: 30
      initContainers:
      # This init container will wait for zookeeper to be ready before
      # deploying the bookies
      - name: wait-zookeeper-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >-
            
            until bin/bookkeeper org.apache.zookeeper.ZooKeeperMain -server my-release-pulsar-zookeeper:2181 get /admin/clusters/my-release-pulsar; do
              echo "pulsar cluster my-release-pulsar isn't initialized yet ... check in 3 seconds ..." && sleep 3;
            done;
        volumeMounts:
        
      # This init container will wait for bookkeeper to be ready before
      # deploying the broker
      - name: wait-bookkeeper-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >
            
            bin/apply-config-from-env.py conf/bookkeeper.conf;
            until bin/bookkeeper shell whatisinstanceid; do
              echo "bookkeeper cluster is not initialized yet. backoff for 3 seconds ...";
              sleep 3;
            done;
            echo "bookkeeper cluster is already initialized";
            bookieServiceNumber="$(nslookup -timeout=10 my-release-pulsar-bookie | grep Name | wc -l)";
            until [ ${bookieServiceNumber} -ge 2 ]; do
              echo "bookkeeper cluster my-release-pulsar isn't ready yet ... check in 10 seconds ...";
              sleep 10;
              bookieServiceNumber="$(nslookup -timeout=10 my-release-pulsar-bookie | grep Name | wc -l)";
            done;
            echo "bookkeeper cluster is ready";
        envFrom:
          - configMapRef:
              name: "my-release-pulsar-bookie"
        volumeMounts:
          
      containers:
      - name: "my-release-pulsar-broker"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        livenessProbe:
          httpGet:
            path: /status.html
            port: 8080
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 10
        readinessProbe:
          httpGet:
            path: /status.html
            port: 8080
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 10
        resources:
          requests:
            cpu: 1.5
            memory: 4096Mi
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/broker.conf;
          bin/gen-yml-from-env.py conf/functions_worker.yml;
          echo "OK" > status;
          
          bin/pulsar zookeeper-shell -server my-release-pulsar-zookeeper:2181 get /loadbalance/brokers/${HOSTNAME}.my-release-pulsar-broker.default.svc.cluster.local:8080;
          while [ $? -eq 0 ]; do
            echo "broker ${HOSTNAME}.my-release-pulsar-broker.default.svc.cluster.local znode still exists ... check in 10 seconds ...";
            sleep 10;
            bin/pulsar zookeeper-shell -server my-release-pulsar-zookeeper:2181 get /loadbalance/brokers/${HOSTNAME}.my-release-pulsar-broker.default.svc.cluster.local:8080;
          done;
          cat conf/pulsar_env.sh;
          OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar broker;
        ports:
        # prometheus needs to access /metrics endpoint
        - name: http
          containerPort: 8080
        - name: "pulsar"
          containerPort: 6650
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-broker"
        volumeMounts:
          
      volumes:
---
# Source: milvus/charts/pulsar/templates/proxy-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: "my-release-pulsar-proxy"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: proxy
spec:
  serviceName: "my-release-pulsar-proxy"
  replicas: 1
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: proxy
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: proxy
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "80"
    spec:
      affinity:
      terminationGracePeriodSeconds: 30
      initContainers:
      # This init container will wait for zookeeper to be ready before
      # deploying the bookies
      - name: wait-zookeeper-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >-
            until bin/pulsar zookeeper-shell -server my-release-pulsar-zookeeper get /admin/clusters/my-release-pulsar; do
              sleep 3;
            done;
      # This init container will wait for at least one broker to be ready before
      # deploying the proxy
      - name: wait-broker-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >-
            set -e;
            brokerServiceNumber="$(nslookup -timeout=10 my-release-pulsar-broker | grep Name | wc -l)";
            until [ ${brokerServiceNumber} -ge 1 ]; do
              echo "pulsar cluster my-release-pulsar isn't initialized yet ... check in 10 seconds ...";
              sleep 10;
              brokerServiceNumber="$(nslookup -timeout=10 my-release-pulsar-broker | grep Name | wc -l)";
            done;
      containers:
      - name: "my-release-pulsar-proxy"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        livenessProbe:
          httpGet:
            path: /status.html
            port: 80
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 10
        readinessProbe:
          httpGet:
            path: /status.html
            port: 80
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 10
        resources:
          requests:
            cpu: 1
            memory: 2048Mi
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/proxy.conf &&
          echo "OK" > status &&
          OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar proxy
        ports:
        # prometheus needs to access /metrics endpoint
        - name: http
          containerPort: 80
        - name: "pulsar"
          containerPort: 6650
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-proxy"
---
# Source: milvus/charts/pulsar/templates/zookeeper-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper only when `components.zookeeper` is true
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: "my-release-pulsar-zookeeper"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: zookeeper
spec:
  serviceName: "my-release-pulsar-zookeeper"
  replicas: 3
  selector:
    matchLabels:
      app: pulsar
      release: my-release
      component: zookeeper
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: pulsar
        release: my-release
        cluster: my-release-pulsar
        component: zookeeper
      annotations:
        prometheus.io/port: "8000"
        prometheus.io/scrape: "true"
    spec:
      affinity:
      terminationGracePeriodSeconds: 30
      containers:
      - name: "my-release-pulsar-zookeeper"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            cpu: 0.3
            memory: 1024Mi
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/zookeeper.conf;
          
          bin/generate-zookeeper-config.sh conf/zookeeper.conf;
          OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar zookeeper;
        ports:
        # prometheus needs to access /metrics endpoint
        - name: http
          containerPort: 8000
        - name: client
          containerPort: 2181
        - name: follower
          containerPort: 2888
        - name: leader-election
          containerPort: 3888
        env:
        - name: ZOOKEEPER_SERVERS
          value:
            my-release-pulsar-zookeeper-0,my-release-pulsar-zookeeper-1,my-release-pulsar-zookeeper-2
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-zookeeper"
        readinessProbe:
          exec:
            command:
            - bin/pulsar-zookeeper-ruok.sh
          initialDelaySeconds: 10
          periodSeconds: 30
          timeoutSeconds: 5
          failureThreshold: 10
        livenessProbe:
          exec:
            command:
            - bin/pulsar-zookeeper-ruok.sh
          initialDelaySeconds: 10
          periodSeconds: 30
          timeoutSeconds: 5
          failureThreshold: 10
        volumeMounts:
        - name: "my-release-pulsar-zookeeper-data"
          mountPath: /pulsar/data
      volumes:
  volumeClaimTemplates:
  - metadata:
      name: "my-release-pulsar-zookeeper-data"
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 20Gi
      storageClassName: "nfs"
---
# Source: milvus/charts/pulsar/templates/bookkeeper-cluster-initialize.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: batch/v1
kind: Job
metadata:
  name: "my-release-pulsar-bookie-init"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: "bookie-init"
spec:
  template:
    spec:
      initContainers:
      - name: wait-zookeeper-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >-
            until nslookup my-release-pulsar-zookeeper-2.my-release-pulsar-zookeeper.default; do
              sleep 3;
            done;
      containers:
      - name: "my-release-pulsar-bookie-init"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >
            bin/apply-config-from-env.py conf/bookkeeper.conf;
            
            if bin/bookkeeper shell whatisinstanceid; then
                echo "bookkeeper cluster already initialized";
            else
                bin/bookkeeper shell initnewcluster;
            fi
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-bookie"
        volumeMounts:
        
      volumes:
      
      restartPolicy: Never
---
# Source: milvus/charts/pulsar/templates/pulsar-cluster-initialize.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: batch/v1
kind: Job
metadata:
  name: "my-release-pulsar-pulsar-init"
  namespace: default
  labels:
    app: pulsar
    chart: pulsar-2.7.8
    release: my-release
    heritage: Helm
    cluster: my-release-pulsar
    component: pulsar-init
spec:
  template:
    spec:
      initContainers:
      - name: wait-zookeeper-ready
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - >-
            until nslookup my-release-pulsar-zookeeper-2.my-release-pulsar-zookeeper.default; do
              sleep 3;
            done;
      # This initContainer will wait for bookkeeper initnewcluster to complete
      # before initializing pulsar metadata
      - name: pulsar-bookkeeper-verify-clusterid
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
        - >
          bin/apply-config-from-env.py conf/bookkeeper.conf;
          
          until bin/bookkeeper shell whatisinstanceid; do
            sleep 3;
          done;
        envFrom:
        - configMapRef:
            name: "my-release-pulsar-bookie"
        volumeMounts:
        
      containers:
      - name: "my-release-pulsar-pulsar-init"
        image: "apachepulsar/pulsar:2.8.2"
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c"]
        args:
          - |
            
            bin/pulsar initialize-cluster-metadata \
              --cluster my-release-pulsar \
              --zookeeper my-release-pulsar-zookeeper:2181 \
              --configuration-store my-release-pulsar-zookeeper:2181 \
              --web-service-url http://my-release-pulsar-broker.default.svc.cluster.local:8080/ \
              --web-service-url-tls https://my-release-pulsar-broker.default.svc.cluster.local:8443/ \
              --broker-service-url pulsar://my-release-pulsar-broker.default.svc.cluster.local:6650/ \
              --broker-service-url-tls pulsar+ssl://my-release-pulsar-broker.default.svc.cluster.local:6651/ ;
        volumeMounts:
        
      volumes:
      
      restartPolicy: OnFailure
---
# Source: milvus/charts/pulsar/templates/autorecovery-podmonitor.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
---
# Source: milvus/charts/pulsar/templates/autorecovery-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/bookkeeper-pdb.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/bookkeeper-podmonitor.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy bookkeeper PodMonitor only when `$.Values.bookkeeper.podMonitor.enabled` is true
---
# Source: milvus/charts/pulsar/templates/bookkeeper-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/bookkeeper-storageclass.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/broker-pdb.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/broker-podmonitor.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy broker PodMonitor only when `$.Values.broker.podMonitor.enabled` is true
---
# Source: milvus/charts/pulsar/templates/broker-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/dashboard-deployment.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/dashboard-ingress.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/dashboard-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/function-worker-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/grafana-admin-secret.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/grafana-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/grafana-deployment.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/grafana-ingress.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/grafana-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/keytool.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# script to process key/cert to keystore and truststore
---
# Source: milvus/charts/pulsar/templates/namespace.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-deployment.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-pvc.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/prometheus-storageclass.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/proxy-ingress.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/proxy-pdb.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/proxy-podmonitor.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy proxy PodMonitor only when `$.Values.proxy.podMonitor.enabled` is true
---
# Source: milvus/charts/pulsar/templates/proxy-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/pulsar-manager-ingress.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/tls-cert-internal-issuer.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/tls-certs-internal.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/toolset-configmap.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/toolset-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/toolset-service.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/toolset-statefulset.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/zookeeper-pdb.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper only when `components.zookeeper` is true
---
# Source: milvus/charts/pulsar/templates/zookeeper-podmonitor.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper PodMonitor only when `$.Values.zookeeper.podMonitor.enabled` is true
---
# Source: milvus/charts/pulsar/templates/zookeeper-rbac.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
# Source: milvus/charts/pulsar/templates/zookeeper-storageclass.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

# deploy zookeeper only when `components.zookeeper` is true

# define the storage class for data directory

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐