使用官方的docker镜像搭建job-ha集群一直失败。最后参考了flink1.11.2 的start-cluster.sh 脚本。对docker 的启动脚本进行了调整。终于成功了。希望能够帮助到大家。
需要注意的是:

  • 我的k8s环境是基于k8s1.14.2 rancher 2.2.8 环境。
  • 192.168.32.14是我本地的镜像仓库地址。这个地址需要换成自己的。

1.修改镜像

1.1 从官网获取Dockerfile源码

github 地址
https://github.com/apache/flink-docker

修改1.11/scala_2.11-java8-debian 目录下的docker-entrypoint.sh

#!/bin/sh

###############################################################################
#  Licensed to the Apache Software Foundation (ASF) under one
#  or more contributor license agreements.  See the NOTICE file
#  distributed with this work for additional information
#  regarding copyright ownership.  The ASF licenses this file
#  to you under the Apache License, Version 2.0 (the
#  "License"); you may not use this file except in compliance
#  with the License.  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################

COMMAND_STANDALONE="standalone-job"

# If unspecified, the hostname of the container is taken as the JobManager address
JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS:-$(hostname -f)}
CONF_FILE="${FLINK_HOME}/conf/flink-conf.yaml"

drop_privs_cmd() {
    if [ $(id -u) != 0 ]; then
        # Don't need to drop privs if EUID != 0
        return
    elif [ -x /sbin/su-exec ]; then
        # Alpine
        echo su-exec flink
    else
        # Others
        echo gosu flink
    fi
}

copy_plugins_if_required() {
  if [ -z "$ENABLE_BUILT_IN_PLUGINS" ]; then
    return 0
  fi

  echo "Enabling required built-in plugins"
  for target_plugin in $(echo "$ENABLE_BUILT_IN_PLUGINS" | tr ';' ' '); do
    echo "Linking ${target_plugin} to plugin directory"
    plugin_name=${target_plugin%.jar}

    mkdir -p "${FLINK_HOME}/plugins/${plugin_name}"
    if [ ! -e "${FLINK_HOME}/opt/${target_plugin}" ]; then
      echo "Plugin ${target_plugin} does not exist. Exiting."
      exit 1
    else
      ln -fs "${FLINK_HOME}/opt/${target_plugin}" "${FLINK_HOME}/plugins/${plugin_name}"
      echo "Successfully enabled ${target_plugin}"
    fi
  done
}

set_config_option() {
  local option=$1
  local value=$2

  # escape periods for usage in regular expressions
  local escaped_option=$(echo ${option} | sed -e "s/\./\\\./g")

  # either override an existing entry, or append a new one
  if grep -E "^${escaped_option}:.*" "${CONF_FILE}" > /dev/null; then
        sed -i -e "s/${escaped_option}:.*/$option: $value/g" "${CONF_FILE}"
  else
        echo "${option}: ${value}" >> "${CONF_FILE}"
  fi
}

set_common_options() {
    set_config_option jobmanager.rpc.address ${JOB_MANAGER_RPC_ADDRESS}
    set_config_option blob.server.port 6124
    set_config_option query.server.port 6125
}

prepare_job_manager_start() {
    echo "Starting Job Manager"
    copy_plugins_if_required

    set_common_options

    if [ -n "${FLINK_PROPERTIES}" ]; then
        echo "${FLINK_PROPERTIES}" >> "${CONF_FILE}"
    fi
    envsubst < "${CONF_FILE}" > "${CONF_FILE}.tmp" && mv "${CONF_FILE}.tmp" "${CONF_FILE}"
}

if [ "$1" = "help" ]; then
    echo "Usage: $(basename "$0") (jobmanager|${COMMAND_STANDALONE}|taskmanager|help)"
    exit 0
elif [ "$1" = "jobmanager" ]; then
    shift 1
    #prepare_job_manager_start

    exec "$FLINK_HOME/bin/jobmanager.sh" start-foreground $(hostname -f) 8081
elif [ "$1" = ${COMMAND_STANDALONE} ]; then
    shift 1
    prepare_job_manager_start

    exec $(drop_privs_cmd) "$FLINK_HOME/bin/standalone-job.sh" start-foreground "$@"
elif [ "$1" = "taskmanager" ]; then
    shift 1
    echo "Starting Task Manager"
    copy_plugins_if_required

    TASK_MANAGER_NUMBER_OF_TASK_SLOTS=${TASK_MANAGER_NUMBER_OF_TASK_SLOTS:-$(grep -c ^processor /proc/cpuinfo)}

    set_common_options
    set_config_option taskmanager.numberOfTaskSlots ${TASK_MANAGER_NUMBER_OF_TASK_SLOTS}

    if [ -n "${FLINK_PROPERTIES}" ]; then
        echo "${FLINK_PROPERTIES}" >> "${CONF_FILE}"
    fi
    envsubst < "${CONF_FILE}" > "${CONF_FILE}.tmp" && mv "${CONF_FILE}.tmp" "${CONF_FILE}"

    exec $(drop_privs_cmd) "$FLINK_HOME/bin/taskmanager.sh" start-foreground "$@"
fi

exec "$@"

注释了99行,修改了101 行,原内容如下

    prepare_job_manager_start

    exec $(drop_privs_cmd) "$FLINK_HOME/bin/jobmanager.sh" start-foreground "$@"

DockerFile内容:

###############################################################################
#  Licensed to the Apache Software Foundation (ASF) under one
#  or more contributor license agreements.  See the NOTICE file
#  distributed with this work for additional information
#  regarding copyright ownership.  The ASF licenses this file
#  to you under the Apache License, Version 2.0 (the
#  "License"); you may not use this file except in compliance
#  with the License.  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################

FROM openjdk:8-jre

# Install dependencies
RUN set -ex; \
  apt-get update; \
  apt-get -y install libsnappy1v5 gettext-base; \
  rm -rf /var/lib/apt/lists/*

# Grab gosu for easy step-down from root
ENV GOSU_VERSION 1.11
RUN set -ex; \
  wget -nv -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)"; \
  wget -nv -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc"; \
  export GNUPGHOME="$(mktemp -d)"; \
  for server in ha.pool.sks-keyservers.net $(shuf -e \
                          hkp://p80.pool.sks-keyservers.net:80 \
                          keyserver.ubuntu.com \
                          hkp://keyserver.ubuntu.com:80 \
                          pgp.mit.edu) ; do \
      gpg --batch --keyserver "$server" --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 && break || : ; \
  done && \
  gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu; \
  gpgconf --kill all; \
  rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc; \
  chmod +x /usr/local/bin/gosu; \
  gosu nobody true

# Configure Flink version
ENV FLINK_TGZ_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=flink/flink-1.11.2/flink-1.11.2-bin-scala_2.11.tgz \
    FLINK_ASC_URL=https://www.apache.org/dist/flink/flink-1.11.2/flink-1.11.2-bin-scala_2.11.tgz.asc \
    GPG_KEY=C63E230EFFF519A5BBF2C9AE6767487CD505859C \
    CHECK_GPG=true

# Prepare environment
ENV FLINK_HOME=/opt/flink
ENV PATH=$FLINK_HOME/bin:$PATH
RUN groupadd --system --gid=9999 flink && \
    useradd --system --home-dir $FLINK_HOME --uid=9999 --gid=flink flink
WORKDIR $FLINK_HOME

# Install Flink
RUN set -ex; \
  wget -nv -O flink.tgz "$FLINK_TGZ_URL"; \
  \
  if [ "$CHECK_GPG" = "true" ]; then \
    wget -nv -O flink.tgz.asc "$FLINK_ASC_URL"; \
    export GNUPGHOME="$(mktemp -d)"; \
    for server in ha.pool.sks-keyservers.net $(shuf -e \
                            hkp://p80.pool.sks-keyservers.net:80 \
                            keyserver.ubuntu.com \
                            hkp://keyserver.ubuntu.com:80 \
                            pgp.mit.edu) ; do \
        gpg --batch --keyserver "$server" --recv-keys "$GPG_KEY" && break || : ; \
    done && \
    gpg --batch --verify flink.tgz.asc flink.tgz; \
    gpgconf --kill all; \
    rm -rf "$GNUPGHOME" flink.tgz.asc; \
  fi; \
  \
  tar -xf flink.tgz --strip-components=1; \
  rm flink.tgz; \
  \
  chown -R flink:flink .;

# Configure container
COPY docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
EXPOSE 6123 8081
CMD ["help"]

1.2 构建docker镜像

在本地构建docker镜像并推送到本地仓库192.168.32.14中。

docker build -t flink:1.11.2 .
docker tag flink:1.11.2 192.168.32.14/library/flink:1.11.2
docker push 192.168.32.14/library/flink:1.11.2

2 创建命名空间

创建flink-ha.yaml

apiVersion: v1
kind: Namespace
metadata:
   name: flink-ha
   labels:
     name: flink-ha
kubectl apply -f flink-ha.yaml

3 准备配置文件和共享存储

3.1 创建存储pvc

创建cluster-log.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cluster-log
  namespace: flink-ha

spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs

创建cluster-pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cluster-pvc
  namespace: flink-ha

spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs

创建ha-jobmanager-storage-dir.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ha-jobmanager-storage-dir
  namespace: flink-ha

spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
  storageClassName: nfs
kubectl apply -f cluster-log.yaml
kubectl apply -f cluster-pvc.yaml
kubectl apply -f ha-jobmanager-storage-dir.yaml

3.2 创建configmap

创建jobmanager-flink-conf.yaml 用于jobmanager配置

apiVersion: v1
data:
  flink-conf.yaml: |-
    jobmanager.rpc.address: localhost
    blob.server.port: 6124
    jobmanager.rpc.port: 6123
    taskmanager.rpc.port: 6122
    queryable-state.proxy.ports: 6125
    jobmanager.memory.process.size: 3200m
    taskmanager.memory.process.size: 10240m
    taskmanager.numberOfTaskSlots: 1
    parallelism.default: 1

    high-availability: zookeeper
    high-availability.cluster-id: /flink-cluster
    high-availability.storageDir: file:/usr/flink/ha/flink-cluster
    high-availability.zookeeper.quorum: 192.168.1.205:2181


    state.backend: filesystem
    state.checkpoints.dir: file:/usr/flink/flink-checkpoints
    state.checkpoints.num-retained: 100
    # state.savepoints.dir: hdfs://namenode-host:port/flink-checkpoints

    jobmanager.execution.failover-strategy: region

    web.upload.dir: /usr/flink/jars

    #metrics reporter
    metrics.reporter.promgateway.class: org.apache.flink.metrics.prometheus.PrometheusPushGatewayReporter
    metrics.reporter.promgateway.host: pushgateway
    metrics.reporter.promgateway.port: 9091
    metrics.reporter.promgateway.jobName: flink-cluster-job
    metrics.reporter.promgateway.randomJobNameSuffix: true
    metrics.reporter.promgateway.deleteOnShutdown: true
    metrics.reporter.promgateway.groupingKey: k1=v1;k2=v2
    metrics.reporter.promgateway.interval: 60 SECONDS
  log4j-console.properties: |-
    # This affects logging for both user code and Flink
    rootLogger.level = INFO
    rootLogger.appenderRef.console.ref = ConsoleAppender
    rootLogger.appenderRef.rolling.ref = RollingFileAppender

    # Uncomment this if you want to _only_ change Flink's logging
    #logger.flink.name = org.apache.flink
    #logger.flink.level = INFO

    # The following lines keep the log level of common libraries/connectors on
    # log level INFO. The root logger does not override this. You have to manually
    # change the log levels here.
    logger.akka.name = akka
    logger.akka.level = INFO
    logger.kafka.name= org.apache.kafka
    logger.kafka.level = INFO
    logger.hadoop.name = org.apache.hadoop
    logger.hadoop.level = INFO
    logger.zookeeper.name = org.apache.zookeeper
    logger.zookeeper.level = INFO

    # Log all infos to the console
    appender.console.name = ConsoleAppender
    appender.console.type = CONSOLE
    appender.console.layout.type = PatternLayout
    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

    # Log all infos in the given rolling file
    appender.rolling.name = RollingFileAppender
    appender.rolling.type = RollingFile
    appender.rolling.append = false
    appender.rolling.fileName = ${sys:log.file}
    appender.rolling.filePattern = ${sys:log.file}.%i
    appender.rolling.layout.type = PatternLayout
    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
    appender.rolling.policies.type = Policies
    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
    appender.rolling.policies.size.size=100MB
    appender.rolling.strategy.type = DefaultRolloverStrategy
    appender.rolling.strategy.max = 10

    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
    logger.netty.level = OFF
kind: ConfigMap
metadata:
  name: jobmanager-flink-conf
  namespace: flink-ha


创建taskmanager-flink-conf.yaml 用于taskmanager配置

apiVersion: v1
data:
  flink-conf.yaml: |-
    jobmanager.rpc.address: flink-cluster
    blob.server.port: 6124
    jobmanager.rpc.port: 6123
    taskmanager.rpc.port: 6122
    queryable-state.proxy.ports: 6125
    jobmanager.memory.process.size: 3200m
    taskmanager.memory.process.size: 10240m
    taskmanager.numberOfTaskSlots: 1
    parallelism.default: 1

    high-availability: zookeeper
    high-availability.cluster-id: /flink-cluster
    high-availability.storageDir: file:/usr/flink/ha/flink-cluster
    high-availability.zookeeper.quorum: 192.168.1.205:2181

    state.backend: filesystem
    state.checkpoints.dir: file:/usr/flink/flink-checkpoints
    state.checkpoints.num-retained: 100
    # state.savepoints.dir: hdfs://namenode-host:port/flink-checkpoints

    jobmanager.execution.failover-strategy: region

    web.upload.dir: /usr/flink/jars

    #metrics reporter
    metrics.reporter.promgateway.class: org.apache.flink.metrics.prometheus.PrometheusPushGatewayReporter
    metrics.reporter.promgateway.host: pushgateway
    metrics.reporter.promgateway.port: 9091
    metrics.reporter.promgateway.jobName: flink-cluster-job
    metrics.reporter.promgateway.randomJobNameSuffix: true
    metrics.reporter.promgateway.deleteOnShutdown: true
    metrics.reporter.promgateway.groupingKey: k1=v1;k2=v2
    metrics.reporter.promgateway.interval: 60 SECONDS
  log4j-console.properties: |-
    # This affects logging for both user code and Flink
    rootLogger.level = INFO
    rootLogger.appenderRef.console.ref = ConsoleAppender
    rootLogger.appenderRef.rolling.ref = RollingFileAppender

    # Uncomment this if you want to _only_ change Flink's logging
    #logger.flink.name = org.apache.flink
    #logger.flink.level = INFO

    # The following lines keep the log level of common libraries/connectors on
    # log level INFO. The root logger does not override this. You have to manually
    # change the log levels here.
    logger.akka.name = akka
    logger.akka.level = INFO
    logger.kafka.name= org.apache.kafka
    logger.kafka.level = INFO
    logger.hadoop.name = org.apache.hadoop
    logger.hadoop.level = INFO
    logger.zookeeper.name = org.apache.zookeeper
    logger.zookeeper.level = INFO

    # Log all infos to the console
    appender.console.name = ConsoleAppender
    appender.console.type = CONSOLE
    appender.console.layout.type = PatternLayout
    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

    # Log all infos in the given rolling file
    appender.rolling.name = RollingFileAppender
    appender.rolling.type = RollingFile
    appender.rolling.append = false
    appender.rolling.fileName = ${sys:log.file}
    appender.rolling.filePattern = ${sys:log.file}.%i
    appender.rolling.layout.type = PatternLayout
    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
    appender.rolling.policies.type = Policies
    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
    appender.rolling.policies.size.size=100MB
    appender.rolling.strategy.type = DefaultRolloverStrategy
    appender.rolling.strategy.max = 10

    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
    logger.netty.level = OFF
kind: ConfigMap
metadata:
  name: taskmanager-flink-conf
  namespace: flink-ha

kubectl apply -f jobmanager-flink-conf.yaml
kubectl apply -f taskmanager-flink-conf.yaml

3.3 部署pushgateway

pushgateway 用于汇总监控指标到promethus
创建pushgateway.yaml

apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: pushgateway
  namespace: flink-ha

spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      workload.user.cattle.io/workloadselector: deployment-flink-ha-pushgateway
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    spec:
      containers:
      - image: 192.168.32.14/prihub/pushgateway:v1.2.0
        imagePullPolicy: Always
        name: pushgateway
        ports:
        - containerPort: 9091
          name: 9091tcp01
          protocol: TCP
        resources: {}
        securityContext:
          allowPrivilegeEscalation: false
          capabilities: {}
          privileged: false
          readOnlyRootFilesystem: false
          runAsNonRoot: false
        stdin: true
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        tty: true
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30

kubectl apply -f pushgateway.yaml

3.4 搭建zookeeper

zookeeper 在192.168.1.205 上搭建单机版。具体搭建过程略。

需要注意的是 zookeeper的环境要与flink的配置文件对应起来。

4 部署jobmanager,taskmanager和pushgateway

4.1 pushgateway

pushgateway.yaml

apiVersion: apps/v1beta2
kind: Deployment
metadata:
  generation: 2
  labels:
    cattle.io/creator: norman
    workload.user.cattle.io/workloadselector: deployment-flink-ha-pushgateway
  name: pushgateway
  namespace: flink-ha
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      workload.user.cattle.io/workloadselector: deployment-flink-ha-pushgateway
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      labels:
        workload.user.cattle.io/workloadselector: deployment-flink-ha-pushgateway
    spec:
      containers:
      - image: 192.168.32.14/prihub/pushgateway:v1.2.0
        imagePullPolicy: Always
        name: pushgateway
        ports:
        - containerPort: 9091
          name: 9091tcp01
          protocol: TCP
        resources: {}
        securityContext:
          allowPrivilegeEscalation: false
          capabilities: {}
          privileged: false
          readOnlyRootFilesystem: false
          runAsNonRoot: false
        stdin: true
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        tty: true
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30


创建pushgateway-nodeport.yaml 服务发现,用于其他命名空间下的promethus 访问。

apiVersion: v1
kind: Service
metadata:
  name: pushgateway-nodeport
  namespace: flink-ha
  ownerReferences:
  - apiVersion: apps/v1beta2
    controller: true
    kind: deployment
    name: pushgateway
    uid: a0b22f8f-fa27-11ea-bc78-005056a72b0f
spec:
  clusterIP: 10.43.5.149
  externalTrafficPolicy: Cluster
  ports:
  - name: 9091tcp01
    nodePort: 31451
    port: 9091
    protocol: TCP
    targetPort: 9091
  selector:
    workload.user.cattle.io/workloadselector: deployment-flink-ha-pushgateway
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}


4.2 jobmanager cluster

3个节点的集群
jobmanager.yaml

apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
  name: flink-cluster
  namespace: flink-ha
spec:
  podManagementPolicy: OrderedReady
  replicas: 3
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: flink-cluster
  serviceName: flink-cluster
  template:
    metadata:
      labels:
        app: flink-cluster
    spec:
      containers:
      - args:
        - jobmanager
        env:
        - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
          value: "1"
        - name: TZ
          value: Asia/Shanghai
        image: 192.168.32.14/library/flink:1.11.2
        imagePullPolicy: Always
        name: flink-cluster
        ports:
        - containerPort: 6124
          name: blob
          protocol: TCP
        - containerPort: 6125
          name: query
          protocol: TCP
        - containerPort: 8081
          name: flink-ui
          protocol: TCP
        resources: {}
        securityContext:
          capabilities: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /usr/flink
          name: vol1
        - mountPath: /opt/flink/conf/
          name: vol2
        - mountPath: /opt/flink/log
          name: vol3
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
      volumes:
      - name: vol1
        persistentVolumeClaim:
          claimName: cluster-pvc
      - configMap:
          defaultMode: 420
          items:
          - key: flink-conf.yaml
            mode: 420
            path: flink-conf.yaml
          - key: log4j-console.properties
            mode: 420
            path: log4j-console.properties
          name: jobmanager-flink-conf
          optional: false
        name: vol2
      - name: vol3
        persistentVolumeClaim:
          claimName: cluster-log
  updateStrategy:
    rollingUpdate:
      partition: 0
    type: RollingUpdate


4.3 taskmanager

taskmanager.yaml

apiVersion: apps/v1beta2
kind: Deployment
metadata:
  generation: 29
  name: flink-taskmanager
  namespace: flink-ha
spec:
  progressDeadlineSeconds: 600
  replicas: 3
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: flink
      component: taskmanager
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: flink
        component: taskmanager
    spec:
      containers:
      - args:
        - taskmanager
        env:
        - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
          value: "1"
        - name: TZ
          value: Asia/Shanghai
        image: 192.168.32.14/library/flink:1.11.2
        imagePullPolicy: IfNotPresent
        name: taskmanager
        ports:
        - containerPort: 6122
          name: rpc
          protocol: TCP
        - containerPort: 6125
          name: query-state
          protocol: TCP
        resources: {}
        securityContext:
          capabilities: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /usr/flink
          name: vol1
        - mountPath: /opt/flink/conf/
          name: vol2
        - mountPath: /opt/flink/log
          name: vol3
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
      volumes:
      - name: vol1
        persistentVolumeClaim:
          claimName: cluster-pvc
      - configMap:
          defaultMode: 420
          items:
          - key: flink-conf.yaml
            path: flink-conf.yaml
          - key: log4j-console.properties
            path: log4j-console.properties
          name: taskmanager-flink-conf
          optional: false
        name: vol2
      - name: vol3
        persistentVolumeClaim:
          claimName: cluster-log

创建flink-ui 负载均衡
flink-cluster-ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: flink-cluster-ingress
  namespace: flink-ha
spec:
  rules:
  - host: flink-cluster-ingress.flink-ha.192.168.32.23.xip.io
    http:
      paths:
      - backend:
          serviceName: ingress-3d1c0b13e898208ae1bee42c4910c1e6
          servicePort: 8081


kubectl apply -f pushgateway.yaml
kubectl apply -f pushgateway-nodeport.yaml
kubectl apply -f jobmanager.yaml
kubectl apply -f taskmanager.yaml
kubectl apply -f flink-cluster-ingress.yaml
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐