flink1.10-单机k8s环境安装
1. 在有docker的环境拉取官方镜像[root@k8smaster01 docker]# docker pull flinkUsing default tag: latestlatest: Pulling from library/flinkDigest: sha256:94906bb9fa87da1ca8503bf47ec706d07ea934ae1ca72814846956c4...
1. 在有docker的环境拉取官方镜像
[root@k8smaster01 docker]# docker pull flink
Using default tag: latest
latest: Pulling from library/flink
Digest: sha256:94906bb9fa87da1ca8503bf47ec706d07ea934ae1ca72814846956c4fca1877e
Status: Image is up to date for flink:latest
docker.io/library/flink:latest
[root@k8smaster01 docker]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
flink latest 8f08cde24fce 2 weeks ago 585MB
保存镜像: docker save flink:latest -o ./flink.tar
发送该包到k8s环境:
[root@k8smaster01 docker]# scp flink.tar root@192.168.X.X:/opt/test/test
生成5个yaml文件(官网拷贝,稍作调整)
[root@vcloud-single test]# pwd
/opt/test/test
flink-configuration-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
taskmanager.numberOfTaskSlots: 2
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
jobmanager.heap.size: 1024m
taskmanager.memory.process.size: 1024m
state.backend: rocksdb
state.checkpoints.dir: file:///opt/flink/checkpoint
state.savepoints.dir: file:///opt/flink/savepoint
web.tmpdir: /opt/flink/uploadjartmpdir
web.upload.dir: /opt/flink/uploadjardir
log4j-cli.properties: |+
log4j.rootLogger=INFO, file
log4j.logger.akka=INFO
log4j.logger.org.apache.kafka=INFO
log4j.logger.org.apache.hadoop=INFO
log4j.logger.org.apache.zookeeper=INFO
log4j.appender.file=org.apache.log4j.FileAppender
log4j.appender.file.file=${log.file}
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, file
log4j.properties: |+
log4j.rootLogger=INFO, file
log4j.logger.akka=INFO
log4j.logger.org.apache.kafka=INFO
log4j.logger.org.apache.hadoop=INFO
log4j.logger.org.apache.zookeeper=INFO
log4j.appender.file=org.apache.log4j.FileAppender
log4j.appender.file.file=${log.file}
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, file
# config.properties: |+
jobmanager-deployment.yaml
说明:
mountPath:是容器内部路径
hostPath:是同一mountPath映射的外部主机路径
apiVersion: apps/v1
kind: Deployment
metadata:
name: flink-jobmanager
spec:
replicas: 1
selector:
matchLabels:
app: flink
component: jobmanager
template:
metadata:
labels:
app: flink
component: jobmanager
spec:
containers:
- name: jobmanager
image: registry:5500/flink:latest
workingDir: /opt/flink
command: ["/bin/bash", "-c", "$FLINK_HOME/bin/jobmanager.sh start;\
while :;
do
if [[ -f $(find log -name '*jobmanager*.log' -print -quit) ]];
then tail -f -n +1 log/*jobmanager*.log;
fi;
done"]
ports:
- containerPort: 6123
name: rpc
- containerPort: 6124
name: blob
- containerPort: 8081
name: ui
livenessProbe:
tcpSocket:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
- name: flink-config-dir
mountPath: /opt/flink/config
- name: state-checkpoints-dir
mountPath: /opt/flink/checkpoint
- name: state-savepoints-dir
mountPath: /opt/flink/savepoint
- name: web-tmpdir
mountPath: /opt/flink/uploadjartmpdir
- name: web-upload-dir
mountPath: /opt/flink/uploadjardir
volumes:
- name: flink-config-dir
hostPath:
path: "/opt/flink/config"
- name: state-checkpoints-dir
hostPath:
path: "/opt/flink/checkpoint"
- name: state-savepoints-dir
hostPath:
path: "/opt/flink/savepoint"
- name: web-tmpdir
hostPath:
path: "/opt/flink/uploadjartmpdir"
- name: web-upload-dir
hostPath:
path: "/opt/flink/uploadjardir"
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j.properties
path: log4j.properties
- key: log4j-cli.properties
path: log4j-cli.properties
taskmanager-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: flink-taskmanager
spec:
replicas: 2
selector:
matchLabels:
app: flink
component: taskmanager
template:
metadata:
labels:
app: flink
component: taskmanager
spec:
containers:
- name: taskmanager
image: registry:5500/flink:latest
workingDir: /opt/flink
command: ["/bin/bash", "-c", "$FLINK_HOME/bin/taskmanager.sh start; \
while :;
do
if [[ -f $(find log -name '*taskmanager*.log' -print -quit) ]];
then tail -f -n +1 log/*taskmanager*.log;
fi;
done"]
ports:
- containerPort: 6122
name: rpc
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j.properties
path: log4j.properties
- key: log4j-cli.properties
path: log4j-cli.properties
jobmanager-service.yaml
apiVersion: v1
kind: Service
metadata:
name: flink-jobmanager
spec:
type: NodePort
ports:
- name: rpc
port: 6123
- name: blob
port: 6124
- name: ui
port: 8081
targetPort: 8081
nodePort: 18598
selector:
app: flink
component: jobmanager
jobmanager-rest-service.yaml
apiVersion: v1
kind: Service
metadata:
name: flink-jobmanager-rest
spec:
type: NodePort
ports:
- name: rest
port: 8081
targetPort: 8081
selector:
app: flink
component: jobmanager
注意:官方文件种的下面一行配置必须去掉,否则上传jar不成功,提交也报错。
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
[root@vcloud-single test]# ls
flink-configuration-configmap.yaml jobmanager-deployment.yaml jobmanager-service.yaml repositories
flink.tar jobmanager-rest-service.yaml manifest.json taskmanager-deployment.yaml
load镜像
docker load -i flink.tar.gz
-- 重命名并指定远程仓库的位置后,上传镜像到k8s指定的远程仓库
docker tag flink:latest registry:5500/flink:latest
docker push registry:5500/flink:latest
-- 启动相关yaml配置
kubectl apply -f flink-configuration-configmap.yaml
kubectl apply -f jobmanager-service.yaml
kubectl apply -f jobmanager-deployment.yaml
kubectl apply -f taskmanager-deployment.yaml
kubectl apply -f jobmanager-rest-service.yaml
或者
kubectl create -f flink-configuration-configmap.yaml
kubectl create -f jobmanager-service.yaml
kubectl create -f jobmanager-deployment.yaml
kubectl create -f taskmanager-deployment.yaml
kubectl create -f jobmanager-rest-service.yaml
-- 验证安装
kubectl get pod |grep flink
kubectl logs -f flink-jobmanager-8b99df654-m6629
kubectl describe pod flink-jobmanager-8b99df654-m6629
通过网页访问flink web ui:{微云节点ip}:18598
-- 卸载服务
kubectl delete -f jobmanager-deployment.yaml
kubectl delete -f taskmanager-deployment.yaml
kubectl delete -f jobmanager-service.yaml
kubectl delete -f flink-configuration-configmap.yaml
kubectl delete -f jobmanager-rest-service.yaml
更多推荐
所有评论(0)