上一篇介绍了如何通过Rancher搭建k8s集群,这里继续介绍通过Rancher配置管理k8s集群

持久化存储

参考Rancher官网:https://docs.rancher.cn/docs/rancher2/cluster-admin/volumes-and-storage/_index/
这里以NFS为例

1 安装NFS v4

请参考 Centos7 安装配置NFS v4:https://blog.csdn.net/iceliooo/article/details/111592244

2 动态创建持久卷

2.1 部署第三方提供的nfs存储类provisioner

见k8s官网:
https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters
https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner
https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

这里发现rancher并没有提供nfs存储类provisioner
在这里插入图片描述

2.1.1 使用非Helm方式部署

1.参考官网编写yaml文件

[root@uat-master nfs-subdir-external-provisioner]# vim rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: middleware
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: middleware
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: middleware
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: middleware
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: middleware
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
[root@uat-master nfs-subdir-external-provisioner]# vim deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  namespace: middleware
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 172.18.30.x
            - name: NFS_PATH
              value: /data/nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.18.30.x
            path: /data/nfs
  1. 部署

创建uat项目,在uat项目下创建namespace: middleware
在这里插入图片描述

部署rbac.yaml

[root@uat-master nfs-subdir-external-provisioner]# kubectl create -f rbac.yaml 
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
[root@uat-master nfs-subdir-external-provisioner]# 

部署deployment.yaml
在这里插入图片描述
也可以通过kubectl 部署

[root@uat-master nfs-subdir-external-provisioner]# kubectl create -f deployment.yaml 
deployment.apps/nfs-client-provisioner created
[root@uat-master nfs-subdir-external-provisioner]# 

在这里插入图片描述

发现POD部署异常

[root@uat-master nfs-subdir-external-provisioner]# kubectl describe pod nfs-client-provisioner-6f79df9fcc-4668b -n middleware
Name:           nfs-client-provisioner-6f79df9fcc-4668b
Namespace:      middleware
Priority:       0
Node:           uat-w2/172.18.30.214
Start Time:     Thu, 24 Dec 2020 11:30:18 +0800
Labels:         app=nfs-client-provisioner
                pod-template-hash=6f79df9fcc
······
Events:
  Type     Reason       Age               From               Message
  ----     ------       ----              ----               -------
  Normal   Scheduled    <unknown>         default-scheduler  Successfully assigned middleware/nfs-client-provisioner-6f79df9fcc-4668b to uat-w2
  Warning  FailedMount  56s (x8 over 2m)  kubelet, uat-w2    MountVolume.SetUp failed for volume "nfs-client-root" : mount failed: exit status 32
Mounting command: mount
Mounting arguments: -t nfs 172.18.30.214:/data/nfs /var/lib/kubelet/pods/3325beb4-909d-4fbf-be15-54c4903ebda8/volumes/kubernetes.io~nfs/nfs-client-root
Output: mount.nfs: access denied by server while mounting 172.18.30.214:/data/nfs
[root@uat-master nfs-subdir-external-provisioner]#

这里属于nfs访问权限问题,可将nfs服务端访问IP限制取消

[root@uat-w2 nfs]# vi /etc/exports
[root@uat-w2 nfs]# cat /etc/exports
/data/nfs   *(rw,sync,no_root_squash)
[root@uat-w2 nfs]# exportfs -r
[root@uat-w2 nfs]# 

成功部署 nfs-client-provisioner
在这里插入图片描述

2.2 配置存储类

2.2.1 部署StorageClass

创建class.yaml

[root@uat-master nfs-subdir-external-provisioner]# vim class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "false"

部署class.yaml

[root@uat-master nfs-subdir-external-provisioner]# kubectl create -f class.yaml 
storageclass.storage.k8s.io/managed-nfs-storage created
[root@uat-master nfs-subdir-external-provisioner]# 

部署成功
在这里插入图片描述

2.2.2 验证

创建test-claim.yaml、test-pod.yaml

[root@uat-master nfs-subdir-external-provisioner]# vim test-claim.yaml

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
  storageClassName: managed-nfs-storage
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi

[root@uat-master nfs-subdir-external-provisioner]# vim test-pod.yaml

kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: busybox:1.24
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

部署

[root@uat-master nfs-subdir-external-provisioner]# kubectl create -f test-claim.yaml 
persistentvolumeclaim/test-claim created
[root@uat-master nfs-subdir-external-provisioner]# kubectl create -f test-pod.yaml 
pod/test-pod created
[root@uat-master nfs-subdir-external-provisioner]# 

成功创建PVC、PV
在这里插入图片描述

NFS服务端成功创建test-pod创建的文件

[root@uat-w2 nfs]# ll
total 8
drwxrwxrwx 2 root root 4096 Dec 24 13:53 default-test-claim-pvc-9ab67258-5888-4721-b5ce-87288641bb1f
-rw-r--r-- 1 root root    7 Dec 23 17:07 hello.txt
[root@uat-w2 nfs]# cd default-test-claim-pvc-9ab67258-5888-4721-b5ce-87288641bb1f/
[root@uat-w2 default-test-claim-pvc-9ab67258-5888-4721-b5ce-87288641bb1f]# ll
total 0
-rw-r--r-- 1 root root 0 Dec 24 14:03 SUCCESS

删除验证资源

[root@uat-master nfs-subdir-external-provisioner]# kubectl delete -f test-claim.yaml -f test-pod.yaml 
persistentvolumeclaim "test-claim" deleted
pod "test-pod" deleted
[root@uat-master nfs-subdir-external-provisioner]# 

3 部署一个zk集群

3.1 部署zk的ConfigMap

创建配置文件

[root@uat-master zk]# vim zk-cm.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: zk-cm
  namespace: middleware
data:
  client.cnxns: "60"
  init: "10"
  jvm.heap: 1G
  purge.interval: "0"
  snap.retain: "3"
  sync: "5"
  tick: "2000"

部署

[root@uat-master zk]# kubectl create -f zk-cm.yaml 
configmap/zk-cm created
[root@uat-master zk]# 

部署成功,也可以通过Rancher UI 部署
在这里插入图片描述
在这里插入图片描述

3.2 部署zk

  • 部署zk工作负载

数据卷中选择添加PVC模板
在这里插入图片描述

在这里插入图片描述
在这里插入图片描述

成功创建工作负载及PVC
在这里插入图片描述
在这里插入图片描述

nfs服务端也自动生成了相应的PV目录

[root@uat-w2 nfs]# ll
total 16
-rw-r--r-- 1 root root    7 Dec 23 17:07 hello.txt
drwxrwxrwx 2 root root 4096 Dec 25 16:59 middleware-data-zookeeper-0-pvc-db84b366-352b-4402-a759-8345feef65af
drwxrwxrwx 2 root root 4096 Dec 25 16:59 middleware-data-zookeeper-1-pvc-3437ff3e-9965-4387-8949-d3d6ba7c275e
drwxrwxrwx 2 root root 4096 Dec 25 17:00 middleware-data-zookeeper-2-pvc-ebe6eda8-217c-42ef-bf80-f9652926dbcc
[root@uat-w2 nfs]# 
  • 验证

在zookeeper-0上创建节点hello 值为world,在zookeeper-1中却不能获节点hello,由此推断集群存在异常

[root@uat-master ~]# kubectl exec zookeeper-0 -n middleware  zkCli.sh create /hello world
Connecting to localhost:2181
log4j:WARN No appenders could be found for logger (org.apache.zookeeper.ZooKeeper).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

WATCHER::

WatchedEvent state:SyncConnected type:None path:null
Created /hello
[root@uat-master ~]# kubectl exec zookeeper-1 -n middleware  zkCli.sh get /hello
Connecting to localhost:2181
log4j:WARN No appenders could be found for logger (org.apache.zookeeper.ZooKeeper).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

WATCHER::

WatchedEvent state:SyncConnected type:None path:null
Node does not exist: /hello
[root@uat-master ~]# 

  • 问题排查

查看节点配置文件,发现没有进行集权配置

[root@uat-w2 conf]# cat /data/nfs/middleware-data-zookeeper-1-pvc-f3d6edd7-baa1-47d2-8842-ddb74c50b895/conf/zoo.cfg 
clientPort=2181
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
maxClientCnxns=60
[root@uat-w2 conf]# 

1、修改配置文件zoo.cfg(修改3个节点的配置文件)

[root@uat-w2 conf]# vim zoo.cfg 

clientPort=2181
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=10
syncLimit=2000
maxClientCnxns=60
minSessionTimeout= 4000
maxSessionTimeout= 40000
autopurge.snapRetainCount=3
autopurge.purgeInteval=0
server.1=zookeeper-0.zookeeper.middleware.svc.cluster.local:2888:3888
server.2=zookeeper-1.zookeeper.middleware.svc.cluster.local:2888:3888
server.3=zookeeper-2.zookeeper.middleware.svc.cluster.local:2888:3888

2、修改myid 文件,里面的值和 zoo.cfg 的server.x 配置保持一致

# zookeeper-1节点myid值修改为2
[root@uat-w2 data]# vim myid 
2
# zookeeper-2节点myid值修改为3
[root@uat-w2 data]# vim myid 
3

3、删除Pod
在这里插入图片描述
4、再次验证,成功

[root@uat-master ~]# kubectl exec zookeeper-0 -n middleware  zkCli.sh create /hello world
Connecting to localhost:2181
log4j:WARN No appenders could be found for logger (org.apache.zookeeper.ZooKeeper).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

WATCHER::

WatchedEvent state:SyncConnected type:None path:null
Created /hello
[root@uat-master ~]# kubectl exec zookeeper-1 -n middleware  zkCli.sh get /hello
Connecting to localhost:2181
log4j:WARN No appenders could be found for logger (org.apache.zookeeper.ZooKeeper).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

WATCHER::

WatchedEvent state:SyncConnected type:None path:null
cZxid = 0x100000003
world
ctime = Sat Jan 02 13:56:52 GMT 2021
mZxid = 0x100000003
mtime = Sat Jan 02 13:56:52 GMT 2021
pZxid = 0x100000003
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 5
numChildren = 0
[root@uat-master ~]# 

节点状态也正常

[root@uat-master ~]# kubectl exec zookeeper-0 -n middleware  zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower
[root@uat-master ~]# kubectl exec zookeeper-1 -n middleware  zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower
[root@uat-master ~]# kubectl exec zookeeper-2 -n middleware  zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: leader
[root@uat-master ~]# 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐