k8s部署zookeeper集群
apiVersion: v1kind: Servicemetadata:name: zk-hslabels:app: zkspec:selector:app: zkports:- port: 2888name: server- port: 3888name: leader-electionculsterIP: None---apiVersion: v1kind: Servicemetadata:n
·
安装nfs(全部节点均需)
yum install nfs-utils -y
vim /etc/exports
/root/news/nfs *(insecure,rw,sync,no_root_squash)
# *: 任意用户均可接入,可修改成10.0.0.0/16,表示该网段用户均可接入
# insecure: 端口不仅限制于(0,1024]
# sync: 所有用户在请求时写入共享
# no_root_squash: root用户具有全部权限
systemctl restart nfs-server
systemctl enable nfs-serverl
#查看挂载信息
showmount
创建PersistentVolume
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0001
spec:
capacity:
storage: 11Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
mountOptions:
- hard
- nfsvers=3
nfs:
path: /root/news/nfs/pv0001
server: 10.0.0.1
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0002
spec:
capacity:
storage: 11Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
mountOptions:
- hard
- nfsvers=3
nfs:
path: /root/news/nfs/pv0002
server: 10.0.0.1
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0003
spec:
capacity:
storage: 11Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle
mountOptions:
- hard
- nfsvers=3
nfs:
path: /root/news/nfs/pv0003
server: 10.0.0.1
部署zookeeper
一下配置文件经过修改,具体就是将zk跑在ubuntu上,然后再将对应目录挂载,且kubernetes-zookeepr也经过修改,避免了UnKnonwnHostException错误;
#每次绑定pv时记得清空旧数据
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2181
targetPort: 2181
nodePort: 30012
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: IfNotPresent
# image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
image: "yuanxi2314/kubernetes-zookeeper1.0-3.4.10:v8.0"
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
# securityContext:
# runAsUser: 0
# fsGroup: 0
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
Kubernetes StatefulSet with ZooKeeper as an example on OpenShift
k8s部署zookeeper集群
zookeeper问题:关于Unable to read additional data from server sessionid 0x0问题的解决
更多推荐
已为社区贡献16条内容
所有评论(0)