目录

部署nfs

#yum安装nfs和rpc的包

#创建共享目录

#给目录nfsnobody用户权限

#编写共享的目录

#启动nfs和rpc

#自测

rancher配置(实现共享存储)

#创建PV

#创建PVC

#使用PVC

KubeSphere 集群配置

#节点安装nfs服务

#集群安装 NFS 动态分配器

#使用


简单部署nfs

#yum安装nfs和rpc的包

yum -y install nfs-utils rpcbind

#创建共享目录

mkdir -p /mnt/nfs/rancher/data

#给目录nfsnobody用户权限

chown -R nfsnobody:nfsnobody /mnt/nfs/rancher/data

#编写共享的目录

vim /etc/exports

/mnt/nfs/rancher/data 192.168.1.0/16(rw,no_root_squash,no_subtree_check,sync)

---  no_root_squash不压缩root用户
---  no_subtree_check不检查父目录的权限

#启动nfs和rpc

systemctl start nfs && systemctl enable nfs

systemctl start rpcbind && systemctl enable rpcbind

#自测

showmount -e 127.0.0.1

rancher配置(实现共享存储)

#创建PV

打开rancher进去到集群界面

 

选择存储--持久卷--创建PV

 

 

#创建PVC

进去项目空间选择pvc--创建pvc

 

 

#使用PVC

选择使用的服务数据卷挂载

 

KubeSphere 集群配置

#节点安装nfs服务

需要有nfs命令否则后期部署是无法挂载

yum install nfs-utils

#集群安装 NFS 动态分配器

官方的nfs provisoner,serviceAccount RABC相关

#SA RABC

#编辑rbac.yaml(只改namespace就可以,其他可直接使用)

vim rbac.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: nfs-client-provisioner  

  namespace: service         

---

kind: ClusterRole

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: nfs-client-provisioner-runner

rules:

  - apiGroups: [""]

    resources: ["persistentvolumes"]

    verbs: ["get", "list", "watch", "create", "delete"]

  - apiGroups: [""]

    resources: ["persistentvolumeclaims"]

    verbs: ["get", "list", "watch", "update"]

    resources: ["storageclasses"]

    verbs: ["get", "list", "watch"]

  - apiGroups: [""]

    resources: ["events"]

    verbs: ["create", "update", "patch"]

---

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: run-nfs-client-provisioner

subjects:

  - kind: ServiceAccount

    name: nfs-client-provisioner

    namespace: service

roleRef:

  kind: ClusterRole

  name: nfs-client-provisioner-runner

  apiGroup: rbac.authorization.k8s.io

---

kind: Role

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: leader-locking-nfs-client-provisioner

  namespace: service

rules:

  - apiGroups: [""]

    resources: ["endpoints"]

    verbs: ["get", "list", "watch", "create", "update", "patch"]

---

kind: RoleBinding

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: leader-locking-nfs-client-provisioner

  namespace: service

subjects:

  - kind: ServiceAccount

    name: nfs-client-provisioner

    namespace: service

roleRef:

  kind: Role

  name: leader-locking-nfs-client-provisioner

  apiGroup: rbac.authorization.k8s.io

#命令启动 

kubectl apply -f ./rbac.yaml

#返回开始创建信息

serviceaccount/nfs-client-provisioner created

clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created

clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created

role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

#官方的 nfs provisoner用途的deloyment

#编写deployment的yaml文件(主要修改位置nfs的IP地址、和共享的目录和使用的namespace空间,其他可直接使用)

vim deployment.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nfs-client-provisioner

  labels:

    app: nfs-client-provisioner

  namespace: service

spec:

  replicas: 1

  strategy:

    type: Recreate

  selector:

    matchLabels:

      app: nfs-client-provisioner

  template:

    metadata:

      labels:

        app: nfs-client-provisioner

    spec:

      serviceAccountName: nfs-client-provisioner

      containers:

        - name: nfs-client-provisioner

          image: quay.io/external_storage/nfs-client-provisioner:latest

          volumeMounts:

            - name: nfs-client-root

              mountPath: /persistentvolumes

          env:

            - name: PROVISIONER_NAME

              value: nfs/provisioner-229

            - name: NFS_SERVER

              value: 192.168.1.10

            - name: NFS_PATH

              value: /mnt/nfs/rancher/data

      volumes:

        - name: nfs-client-root

          nfs:

            server: 192.168.1.10

            path: /mnt/nfs/rancher/data

#启动之前需要/etc/kubernetes/manifests/kube-apiserver.yaml 添加参数 - --feature-gates=RemoveSelfLink=false(在k8s集群 v1.20之前都存在,后面的创建和挂载 NFS 存储卷启动不成功就加这个参数)

 

#命令启动 kubectl apply -f deployment.yaml

#返回值

deployment.apps/nfs-client-provisioner created

#界面操作,直接把yaml文件复制进去就可以

 

#创建storageClass

# vim storageclass.yaml,需要注意值的对应,请根据上方deployment部署时候的provisioner_name做对应的修改,或者没有修改,就不用动

apiVersion: storage.k8s.io/v1

kind: StorageClass

metadata:

  name: managed-nfs-storage

  annotations:

    "storageclass.kubernetes.io/is-default-class": "false"

provisioner: nfs/provisioner-229 # or choose another name, must match deployment'env PROVISIONER_NAME'

parameters:

  archiveOnDelete: "false"

#界面操作,直接复制进来

 

#创建和挂载 NFS 存储卷

#vim yaml文件

kind: PersistentVolumeClaim

apiVersion: v1

metadata:

  name: nfsce

  namespace: service

  annotations:

    kubesphere.io/creator: admin

    volume.beta.kubernetes.io/storage-provisioner: nfs/provisioner-229

  finalizers:

    - kubernetes.io/pvc-protection

spec:

  accessModes:

    - ReadWriteMany

  resources:

    requests:

      storage: 100Gi  #设施可使用的容量大小

  storageClassName: managed-nfs-storage

  volumeMode: Filesystem

#界面操作,直接复制进来

 

#使用

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐