K8S---NFS存储动态配置及使用
k8s创建nfs动态存储及使用
·
官方地址
https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner
存储节点安装 NFS
yum -y install nfs-utils rpcbind
创建部署
kubectl create -f deployment.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: nfs-udp
port: 2049
protocol: UDP
- name: nlockmgr
port: 32803
- name: nlockmgr-udp
port: 32803
protocol: UDP
- name: mountd
port: 20048
- name: mountd-udp
port: 20048
protocol: UDP
- name: rquotad
port: 875
- name: rquotad-udp
port: 875
protocol: UDP
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
- name: statd
port: 662
- name: statd-udp
port: 662
protocol: UDP
selector:
app: nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 2 # 俩个副本
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: k8s.gcr.io/sig-storage/nfs-provisioner:v3.0.0
ports:
- name: nfs
containerPort: 2049
- name: nfs-udp
containerPort: 2049
protocol: UDP
- name: nlockmgr
containerPort: 32803
- name: nlockmgr-udp
containerPort: 32803
protocol: UDP
- name: mountd
containerPort: 20048
- name: mountd-udp
containerPort: 20048
protocol: UDP
- name: rquotad
containerPort: 875
- name: rquotad-udp
containerPort: 875
protocol: UDP
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
- name: statd
containerPort: 662
- name: statd-udp
containerPort: 662
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs" # 与 StorageClass 中 provisioner 一致,类似于绑定
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export #镜像内部 /export 目录
volumes:
- name: export-volume
hostPath:
path: /srv # 本地的 /srv 目录,存储放置的位置
RBAC 授权
kubectl create -f rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
创建 StorageClass
kubectl create -f class.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: example-nfs
provisioner: example.com/nfs #与 deployment.yaml 中 provisioner 一致
mountOptions:
- vers=4.1
创建 PVC
kubectl create -f claim.yaml
apiVersion: v1
metadata:
name: nfs
spec:
storageClassName: example-nfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
使用 PVC
kubectl create -f write-pod.yaml
kind: Pod
apiVersion: v1
metadata:
name: write-pod
spec:
containers:
- name: write-pod
image: busybox:latest
imagePullPolicy: "IfNotPresent"
command:
command:
- "/bin/sh"
args:
- "-c"
#- "touch /mnt/SUCCESS && exit 0 || exit 1"
- "touch /mnt/SUCCESS" # 向 PVC 中创建 SUCCESS 文件
- ";sleep 3600"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt" # 将存储挂载到容器本地 /mnt 目录下
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: nfs # PVC名称
核查文件是否创建
# nfs 存储在 k8s-node1 节点
[root@k8s-master1 nfs-ganesha-server]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-provisioner-7bd7fffb49-rbtb9 1/1 Running 0 3h38m 10.244.3.96 k8s-node1 <none> <none>
write-pod 0/1 Completed 0 112s 10.244.3.98 k8s-node1 <none> <none>
# 查看 pv,pvc 的信息
[root@k8s-master1 nfs-ganesha-server]# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pvc-e76fc58a-2b2b-42e1-b06f-467dbdc16ff2 1Mi RWX Delete Bound default/nfs example-nfs 3h39m
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/nfs Bound pvc-e76fc58a-2b2b-42e1-b06f-467dbdc16ff2 1Mi RWX example-nfs 3h39m
# 在 k8s-node1 节点上查看 SUCCESS 文件是否被创建
[root@k8s-node1 srv]# ll /srv/pvc-e76fc58a-2b2b-42e1-b06f-467dbdc16ff2
总用量 0
-rw-r--r-- 1 root root 0 6月 8 14:59 SUCCESS
更多推荐
已为社区贡献25条内容
所有评论(0)