​​​​​

一、搭建 NFS 服务

1.安装

yum -y install nfs-utils rpcbind
  • 1

2.配置

vi /etc/exports
  • 1

输入内容:

/nfsdata *(rw,sync,no_root_squash)
  • 1

创建共享目录:

mkdir /nfsdata
  • 1

3.启动

启动服务:

systemctl start rpcbind
systemctl enable rpcbind

systemctl start nfs-server
systemctl enable nfs-server
  • 1
  • 2
  • 3
  • 4
  • 5

4.检查是否安装成功

查看是否成功挂载:

showmount -e
  • 1
[root@master01 /]# showmount -e
Export list for master01:
/nfsdata *

二、NFS 客户端配置

1.安装

yum install -y nfs-utils rpcbind
  • 1

2.创建挂载目录

mkdir -p /nfs/data
  • 1

3.挂载

mount -t nfs 192.168.1.28:/nfsdata /nfs/data
  • 1

说明:
mount:挂载命令
-t:挂载选项
nfs:挂载协议
192.168.1.28 服务器IP
/nfsdata:nfs 服务器共享目录
/nfs/data:客户机挂载目录

4.查看挂载信息

df -Th
  • 1

5.测试

/nfs/data 目录下创建 test.txt 文件,在服务器 /nfsdata 下也能查到该文件

6.取消挂载

umount /nfs/data
  • 1

三、K8s下配置storageclass

以下使用开源插件进行配置。

1.下载

git clone https://github.com/kubernetes-incubator/external-storage.git
  • 1

git clone https://github.com/kubernetes-retired/external-storage.git
  • 1

2.授权

cd external-storage/nfs-client/deploy
  • 1
vi rbac.yaml
  • 1
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
部署:
```bash
kubectl apply -f rbac.yaml

查看:

[root@master01 deploy]# kubectl get sa
NAME                     SECRETS   AGE
nfs-client-provisioner   1         179m

3.部署 deployment

vi deployment.yaml
  • 1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.1.28
            - name: NFS_PATH
              value: /nfsdata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.28
            path: /nfsdata

部署:

kubectl apply -f rbac.yaml
  • 1

查看:

[root@master01 deploy]# kubectl get sa
NAME                     SECRETS   AGE
nfs-client-provisioner   1         179m

4.部署 storageclass

vi class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "false"

部署:

kubectl apply -f class.yaml
  • 1

查看:

[root@master01 deploy]# kubectl get sc
NAME                            PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
managed-nfs-storage (default)   fuseim.pri/ifs   Delete          Immediate           false                  6h48m

5.测试 pvc

vi test-claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi

部署:

kubectl apply -f test-claim.yaml

查看:

kubectl get pvc

6.测试 Pod

vi test-pod.yaml
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: gcr.io/google_containers/busybox:1.24
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

部署:

kubectl apply -f test-pod.yaml

查看:

kubectl get pods 

四、常见报错

1.provision “default/test-claim” class “managed-nfs-storage”: unexpected error getting claim reference: selfLink was empty, can’t make reference

2.waiting for a volume to be created, either by external provisioner “fuseim.pri/ifs” or manually created by system administrator

解决方法见 【Kubernetes系列】Kubernetes常见报错

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐