在这里插入图片描述

emptyDir卷

在这里插入图片描述

在这里插入图片描述

emptyDir 示例
[root@server2 ~]# kubectl delete pod mypod --force 
[root@server2 ~]# mkdir volumes
[root@server2 ~]# cd volumes/
[root@server2 volumes]# vim emptydir.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: vol1
spec:
  containers:
  - image: busyboxplus
    name: vm1
    stdin: true
    tty: true
    volumeMounts:
    - mountPath: /cache
      name: cache-volume
  - name: vm2
    image: myapp:v1
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: cache-volume
  volumes:
  - name: cache-volume
    emptyDir:
      medium: Memory
      sizeLimit: 100Mi
[root@server2 volumes]# kubectl apply -f emptydir.yaml 
[root@server2 volumes]# kubectl get pod
[root@server2 volumes]# kubectl describe pod vol1 

在这里插入图片描述

在这里插入图片描述

[root@server2 volumes]# kubectl get pod -o wide

在这里插入图片描述

[root@server2 volumes]# kubectl attach vol1 -c vm1 -it
/ # cd /cache/
/cache # echo www.westos.org > index.html
/cache # curl localhost

在这里插入图片描述

emptyDir缺点

在这里插入图片描述

[root@server2 volumes]# kubectl attach vol1 -c vm1 -it
/ # cd cache/
/cache # dd if=/dev/zero of=bigfile bs=1M count=200
[root@server2 volumes]# kubectl get pod

可以看到文件超过sizelimit,则一段时间后会被kubelet evict掉。之所以不是立即被evict,是因为kubelet是定期检查的,这里会有一个时间差
在这里插入图片描述

[root@server2 volumes]# kubectl delete pod vol1 

hostPath卷

在这里插入图片描述

  • 除了必要的path属性之外,用户可以选择性地为hostpath卷指定type

在这里插入图片描述

hostPath示例
[root@server2 volumes]# vim hostpath.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  containers:
  - image: myapp:v1
    name: vm1
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: test-volume
  volumes:
  - name: test-volume
    hostPath:
      path: /webdata
      type: DirectoryOrCreate
[root@server2 volumes]# kubectl apply -f hostpath.yaml 
[root@server2 volumes]# kubectl get pod -o wide
[root@server2 volumes]# curl 10.244.22.25

在这里插入图片描述
在server4节点写入

[root@server4 ~]# cd /webdata/
[root@server4 webdata]# echo www.westos.org > index.html

在访问

[root@server2 volumes]# curl 10.244.22.25

在这里插入图片描述

[root@server2 volumes]# kubectl delete pod test-pd 

删除容器后,srver4的写入的内容并不会被删除

在这里插入图片描述

NFS

[root@server2 volumes]# vim nfs.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nfs-pd
spec:
  containers:
  - image: myapp:v1
    name: vm1
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: test-volume
  volumes:
  - name: test-volume
    nfs:
      server: 192.168.3.201
      path: /nfsdata
[root@server1 ~]# yum install -y nfs-utils
每个节点尽量都安装一下
[root@server1 ~]# vim /etc/exports
/nfsdata        *(rw,no_root_squash)
[root@server1 ~]# systemctl enable --now nfs
[root@server1 ~]# showmount -e

在这里插入图片描述

[root@server4 webdata]# yum install -y nfs-utils
[root@server2 volumes]# kubectl apply -f nfs.yaml 
[root@server2 volumes]# kubectl get pod -o wide
[root@server2 volumes]# curl 10.244.22.26

在这里插入图片描述

[root@server1 nfsdata]# rm -rf *
[root@server1 nfsdata]# echo www.westos.org > index.html
[root@server2 volumes]# curl 10.244.22.26

在这里插入图片描述

PersistentVolume (持久卷,简称PV)

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

NFS PV 示例

清理实验环境

[root@server2 volumes]# kubectl delete nfs.yaml 

编写文件

[root@server2 volumes]# vim pv1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv1
spec:
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv1
    server: 192.168.3.201
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv2
spec:
  capacity:
    storage: 10Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv2
    server: 192.168.3.201
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv3
spec:
  capacity:
    storage: 20Gi
  volumeMode: Filesystem
  accessModes:
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv3
    server: 192.168.3.201
[root@server2 volumes]# vim pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc1
spec:
  storageClassName: nfs
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc2
spec:
  storageClassName: nfs
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi

---
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  containers:
  - image: myapp:v1
    name: nginx
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: nfs-pv
  volumes:
  - name: nfs-pv
    persistentVolumeClaim:
      claimName: pvc1

---
apiVersion: v1
kind: Pod
metadata:
  name: test-pd-2
spec:
  containers:
  - image: myapp:v1
    name: nginx
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: nfs-pv-2
  volumes:
  - name: nfs-pv-2
    persistentVolumeClaim:
      claimName: pvc2
[root@server1 nfsdata]# rm -rf *
[root@server1 nfsdata]# mkdir pv1 pv2 pv3
[root@server1 nfsdata]# cd pv1
[root@server1 pv1]# echo www.westos.org > index.html
[root@server1 pv1]# cd ../pv2
[root@server1 pv2]# echo www.redhat.com > index.html

在这里插入图片描述

[root@server2 volumes]# kubectl apply -f pv1.yaml 
[root@server2 volumes]# kubectl apply -f pvc.yaml 
[root@server2 volumes]# kubectl get  pv
[root@server2 volumes]# kubectl get  pvc

在这里插入图片描述

[root@server2 volumes]# kubectl get  pod -o wide

在这里插入图片描述
删除一个pvc后

[root@server2 volumes]# kubectl delete pvc pvc2 
[root@server2 volumes]# kubectl get pv

在这里插入图片描述

此处的文件也会被回收

[root@server1 pv2]# ll

在这里插入图片描述

清理环境

[root@server2 volumes]# kubectl delete -f pvc.yaml
[root@server2 volumes]# kubectl delete -f pv1.yaml

同理此处文件被清理
在这里插入图片描述

[root@server1 nfsdata]# rm -rf *

动态卷

在这里插入图片描述
在这里插入图片描述
上传所需镜像

[root@server1 ~]# docker load -i nfs-client-provisioner-v4.0.0.tar
[root@server1 harbor]# docker push reg.westos.org/library/nfs-subdir-external-provisioner:v4.0.0

创建实验目录

[root@server2 ~]# cd volumes/
[root@server2 volumes]# mkdir nfs-client
[root@server2 ~]# cd volumes/nfs-client/

编写代码

参考代码请点击

[root@server2 nfs-client]# vim nfs-client-provisioner.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-client-provisioner
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-client-provisioner
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: nfs-subdir-external-provisioner:v4.0.0
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.3.201
            - name: NFS_PATH
              value: /nfsdata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.3.201
            path: /nfsdata
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "true"
[root@server2 ~]# vim pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
spec:
  storageClassName: managed-nfs-storage
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
---
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: myapp:v1
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/usr/share/nginx/html"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

创建独自的namespace

[root@server2 nfs-client]# kubectl create namespace nfs-client-provisioner
[root@server2 nfs-client]# kubectl get ns

在这里插入图片描述
运行

[root@server2 nfs-client]# kubectl apply -f nfs-client-provisioner.yaml -n nfs-client-provisioner
[root@server2 nfs-client]# kubectl get sc

在这里插入图片描述

[root@server2 nfs-client]# kubectl apply -f pvc.yaml 
[root@server2 nfs-client]# kubectl get pvc
[root@server2 nfs-client]# kubectl get pv

在这里插入图片描述
查看文件会自动生成,编写一个发布目录

[root@server1 nfsdata]# ls
[root@server1 nfsdata]# cd default-test-claim-pvc-8443c14a-b355-476c-a819-c3083b4e1177/
[root@server1 default-test-claim-pvc-8443c14a-b355-476c-a819-c3083b4e1177]# echo www.westos.org > index.html

在这里插入图片描述
查看ip并访问

[root@server2 nfs-client]# kubectl get pod -o wide

在这里插入图片描述

在这里插入图片描述

StatefulSet

在这里插入图片描述

清理实验环境
在这里插入图片描述

[root@server2 ~]# cd volumes/
[root@server2 volumes]# mkdir statefulset
[root@server2 volumes]# cd statefulset/
[root@server2 statefulset]# vim service.yaml
apiVersion: v1
kind: Service
metadata:
 name: nginx-svc
 labels:
  app: nginx
spec:
 ports:
 - port: 80
   name: web
 clusterIP: None
 selector:
  app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
 name: web
spec:
 serviceName: "nginx-svc"
 replicas: 2
 selector:
  matchLabels:
   app: nginx
 template:
  metadata:
   labels:
    app: nginx
  spec:
   containers:
   - name: nginx
     image: myapp:v1
     ports:
     - containerPort: 80
       name: web
     volumeMounts:
       - name: www
         mountPath: /usr/share/nginx/html
 volumeClaimTemplates:
  - metadata:
     name: www
    spec:
     storageClassName: managed-nfs-storage
     accessModes:
     - ReadWriteOnce
     resources:
      requests:
       storage: 1Gi
[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod
[root@server2 statefulset]# kubectl get pvc
[root@server2 statefulset]# kubectl get pv

在这里插入图片描述

[root@server1 harbor]# cd /nfsdata/
[root@server1 nfsdata]# ls
[root@server1 nfsdata]# echo web-0 > default-www-web-0-pvc-c05e881a-4fdb-46d5-8e63-6b9c7cb73ff0/index.html
[root@server1 nfsdata]# echo web-1 > default-www-web-1-pvc-c07040d3-077c-4b3d-90c2-ddf83380d407/index.html

在这里插入图片描述

[root@server2 statefulset]# kubectl get pod -o wide

在这里插入图片描述
重建后ip会变化,在容器内访问

[root@server2 statefulset]# kubectl run demo --image=busyboxplus -it

在这里插入图片描述
删除

[root@server2 statefulset]# vim service.yaml

在这里插入图片描述

[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod

从下到上慢慢关闭
在这里插入图片描述
开启

[root@server2 statefulset]# vim service.yaml

在这里插入图片描述

[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod

从上至下开启
在这里插入图片描述
访问不变

[root@server2 statefulset]# kubectl attach demo -it

在这里插入图片描述

使用statefullset部署mtsql主从集群

在这里插入图片描述
清理实验环境

[root@server2 statefulset]# kubectl delete -f service.yaml 
[root@server2 statefulset]# kubectl delete pod demo --force 
[root@server2 statefulset]# kubectl delete pvc --all
[root@server2 statefulset]# kubectl delete cm cm1-config my-config my-config-2 my-config-3 nginx-config
[root@server2 statefulset]# mkdir mysql
[root@server2 statefulset]# cd mysql/
[root@server2 mysql]# vim configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin    
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
[root@server2 mysql]# kubectl apply -f configmap.yaml 
[root@server2 mysql]# kubectl get cm
[root@server2 mysql]# kubectl describe cm mysql

在这里插入图片描述

[root@server2 mysql]# vim service.yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql
[root@server2 mysql]# kubectl apply -f service.yaml 
[root@server2 mysql]# kubectl get svc

在这里插入图片描述

[root@server1 nfsdata]# docker pull mysql:5.7
[root@server1 nfsdata]# docker tag mysql:5.7 reg.westos.org/library/mysql:5.7
[root@server1 nfsdata]# docker push reg.westos.org/library/mysql:5.7

gcr.io/google-samples/xtrabackup:1.0这个镜像拉取不到

[root@server2 mysql]# vim statefulset.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: mysql:5.7
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi          
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: xtrabackup:1.0
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: mysql:5.7
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 512Mi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1      
      - name: xtrabackup
        image: xtrabackup:1.0
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql

          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave. (Need to remove the tailing semicolon!)
            cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_slave_info xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm -f xtrabackup_binlog_info xtrabackup_slave_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi

          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done

            echo "Initializing replication from clone position"
            mysql -h 127.0.0.1 \
                  -e "$(<change_master_to.sql.in), \
                          MASTER_HOST='mysql-0.mysql', \
                          MASTER_USER='root', \
                          MASTER_PASSWORD='', \
                          MASTER_CONNECT_RETRY=10; \
                        START SLAVE;" || exit 1
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
          fi

          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 5Gi
[root@server2 mysql]# kubectl apply -f statefulset.yaml 
[root@server2 mysql]# yum insatll -y myriadb
[root@server2 mysql]# kubectl get svc
[root@server2 mysql]# kubectl get pod -o wide
[root@server2 mysql]# mysql -h 容器ip

在这里插入图片描述
自动生成目录

[root@server1 ~]# cd /nfsdata/
[root@server1 nfsdata]# ls
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐