一、创建NFS动态卷持久化存储

1、创建NFS服务端

[root@work03 ~]# yum install nfs-utils rpcbind -y
[root@work03 ~]# systemctl start nfs
[root@work03 ~]# systemctl start rpcbind
[root@work03 ~]# systemctl enable nfs
[root@work03 ~]# systemctl enable rpcbind
[root@work03 ~]# mkdir -p /data/mongodb/
[root@work03 ~]# vim /etc/exports
/data/mongodb/    *(rw,sync,no_root_squash,no_all_squash)
[root@work03 ~]# systemctl restart rpcbind
[root@work03 ~]# systemctl restart nfs
[root@work03 ~]# showmount -e localhost
Export list for localhost:
/data/nfs *

2、创建动态卷提供者
(1)创建serviceaccount

vim serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
kubectl apply -f serviceaccount.yaml

(2)创建service-rbac

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: kube-system
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: kube-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
kubectl apply -f  service-rbac.yaml

(3)创建nfs-provisioner-deploy

vim nfs-provisioner-deploy.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  name: redis-nfs
  namespace: kube-system
spec:
  selector:
    matchLabels:
      app: redis-nfs
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: redis-nfs
    spec:
      serviceAccount: nfs-client-provisioner
      imagePullSecrets:
      - name: regcred
      containers:
        - name: redis-nfs
          image: 192.168.0.107:80/heosun/nfs-client-provisioner:v1.0
          volumeMounts:
            - name: redis-nfs-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: heosun/redis
            - name: NFS_SERVER
              value: 192.168.0.108
            - name: NFS_PATH
              value: /data/redis
      volumes:
        - name: redis-nfs-root
          nfs:
            server: 192.168.0.108
            path: /data/redis

(4)创建storageclass

vim storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: redis-nfs
provisioner: heosun/redis

二、部署redis集群

1、创建configmap

vim redis-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-cluster
data:
  update-node.sh: |
    #!/bin/sh
    REDIS_NODES="/data/nodes.conf"
    sed -i -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${REDIS_NODES}
    exec "$@"
  redis.conf: |+
    cluster-enabled yes
    cluster-require-full-coverage no
    cluster-node-timeout 15000
    cluster-config-file /data/nodes.conf
    cluster-migration-barrier 1
    appendonly yes
    protected-mode no
kubectl apply -f redis-configmap.yaml

2、创建redis-service

vim redis-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-cluster
spec:
  type: ClusterIP
  ports:
  - port: 6379
    targetPort: 6379
    name: client
  - port: 16379
    targetPort: 16379
    name: gossip
  selector:
    app: redis-cluster
---
apiVersion: v1
kind: Service
metadata:
  name: redis-access-service
  labels:
    app: redis
spec:
  clusterIP: 10.98.220.182
  type: NodePort
  ports:
  - name: redis-port
    protocol: "TCP"
    port: 6379
    targetPort: 6379
    nodePort: 30009
  selector:
    app: redis-cluster
kubectl apply -f redis-service.yaml

3、创建serviceaccount

vim serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: redis
  labels:
    app: redis

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: redis
  labels:
    app: redis
rules:
- apiGroups:
    - ""
  resources:
    - endpoints
  verbs:
    - get

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: redis
  labels:
    app: redis
subjects:
- kind: ServiceAccount
  name: redis
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: redis
kubectl apply -f serviceaccount.yaml

4、创建redis-statefulset

vim redis-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-cluster
spec:
  serviceName: redis-cluster
  replicas: 6
  selector:
    matchLabels:
      app: redis-cluster
  template:
    metadata:
      labels:
        app: redis-cluster
    spec:
      containers:
      - name: redis
        image: redis:5.0.1-alpine
        ports:
        - containerPort: 6379
          name: client
        - containerPort: 16379
          name: gossip
        command: ["/conf/update-node.sh", "redis-server", "/conf/redis.conf"]
        env:
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        volumeMounts:
        - name: conf
          mountPath: /conf
          readOnly: false
        - name: data
          mountPath: /data
          readOnly: false
      volumes:
      - name: conf
        configMap:
          name: redis-cluster
          defaultMode: 0755
  volumeClaimTemplates:
  - metadata:
      name: data
      annotations:
        volume.beta.kubernetes.io/storage-class: redis-nfs
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi
kubectl apply -f redis-statefulset.yaml

5、使用 busybox 镜像的 nslookup 检验域名

kubectl run -i --tty --image busybox dns-test --restart=Never --rm /bin/sh
nslookup 10.244.1.162

6、初始化 Redis 集群

kubectl run -i --tty ubuntu --image=ubuntu --restart=Always /bin/bash

在新启的 ubuntu 容器中添加阿里源

cat > /etc/apt/sources.list << EOF
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
EOF

更新 apt 并安装相关软件与 redis-trib

apt-get update
apt-get install -y vim wget python2.7 python-pip redis-tools dnsutils
pip install redis-trib

建立 Master 集群

redis-trib.py create  redis-cluster-0.redis-cluster.default.svc.cluster.local:6379  redis-cluster-1.redis-cluster.default.svc.cluster.local:6379   redis-cluster-2.redis-cluster.default.svc.cluster.local:6379

成功结果展示:

Redis-trib 0.6.2 Copyright (c) HunanTV Platform developers
INFO:root:Instance at 10.244.0.98:6379 checked
INFO:root:Instance at 10.244.1.163:6379 checked
INFO:root:Instance at 10.244.1.162:6379 checked
INFO:root:Add 5462 slots to 10.244.0.98:6379
INFO:root:Add 5461 slots to 10.244.1.163:6379
INFO:root:Add 5461 slots to 10.244.1.162:6379

如果报错将redis-cluster-2.redis-cluster.default.svc.cluster.local 更为ip
为各个 Master 添加 Slave

redis-trib.py replicate --master-addr redis-0.redis-service.default.svc.cluster.local:6379 --slave-addr redis-3.redis-service.default.svc.cluster.local:6379

成功结果展示:

Redis-trib 0.6.2 Copyright (c) HunanTV Platform developers
INFO:root:Instance at 10.244.0.99:6379 has joined 10.244.1.162:6379; now set replica
INFO:root:Instance at 10.244.0.99:6379 set as replica to 966fee4175b3d16f810faa30a6c5ceecb8da4e43

进入任意 redis 容器 (内部测试)

kubectl exec -it redis-0 -- bash
root@redis-0:/data# redis-cli -c
127.0.0.1:6379> set aa 666
-> Redirected to slot [1180] located at 10.244.0.98:6379
OK

10.244.0.98:6379> get aa
"666"

10.244.0.98:6379> role
1) "master"
2) (integer) 403
3) 1) 1) "10.244.1.164"
      2) "6379"
      3) "403"

redis-cli -c 该命令表示以集群模式进入 redis

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐