k8s安装storageclass
搭建NFS 服务端#下载安装yum install nfs-utils -y#创建共享目录mkdir -p /data/harbor#修改权限chmod -R 777 /data/harbor#修改配置文件vim /etc/exports#前面是共享目录,后面星代表所有ip,fsid、anonuid、anongid是给从节点写入权限,0代表root用户/data/harbor *(rw,sync,
·
搭建NFS 服务端
#下载安装
yum install nfs-utils -y
#创建共享目录
mkdir -p /data/harbor
#修改权限
chmod -R 777 /data/harbor
#修改配置文件
vim /etc/exports
#前面是共享目录,后面星代表所有ip,fsid、anonuid、anongid是给从节点写入权限,0代表root用户
/data/harbor *(rw,sync,fsid=0,anonuid=0,anongid=0)
#配置生效
exportfs -r
#查看生效
exportfs
#启动rpcbind、nfs服务
systemctl restart rpcbind && systemctl enable rpcbind
systemctl restart nfs && systemctl enable nfs
客户端
yum install nfs-utils rpcbind -y
# 启动rpc
systemctl restart rpcbind && systemctl enable rpcbind
# 查看共享目录
[root@k8s-node2 ~]# showmount -e 172.19.89.84
Export list for 172.19.89.84:
/data/harbor *
# 创建目录
mkdir -p /data/harbor
# 挂载
mount -t nfs 172.19.89.84:/data/harbor /data/harbor
# 如果想要开机自动将共享目录挂载到本地,往/etc/fstab 中追加:
echo "172.19.89.84:/data/harbor /data/harbor nfs defaults 0 0" >> /etc/fstab
下面开始部署nfs-client-provisioner
找个可以联网得机器拉取docker镜像
# 打包镜像
save -o nfs-client-provisioner jmgao1983/nfs-client-provisioner:latest
# 上传到服务器
# 加载镜像
docker load -i nfs-client-provisioner
#修改镜像名称符合文件中得名称
docker tag jmgao1983/nfs-client-provisioner:latest nfs-client-provisioner:latest
# 创建namesbace
rbac鉴权、nfsserver、客户端与server端同步
在主节点创建 rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get","create","list", "watch","update"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
#vi nfs-deployment.yaml;创建nfs-client的授权
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-client-provisioner
image: nfs-client-provisioner:latest #镜像名称跟版本号
imagePullPolicy: Never #这个是代表使用本地镜像,默认从网上拉取
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME #供应者的名字
value: storage.pri/nfs #名字虽然可以随便起,以后引用要一致
- name: NFS_SERVER
value: 172.19.89.84 #主节点ip
- name: NFS_PATH
value: /data/harbor #挂载目录
volumes:
- name: nfs-client-root
nfs:
server: 172.19.89.84 #主节点ip
path: /data/harbor #挂载目录
创建storageclass-nfs.yaml文件
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: harbor
provisioner: storage.pri/nfs
reclaimPolicy: Delete
allowVolumeExpansion: True #允许pvc创建后扩容
执行yaml文件
[root@k8s-master nfs]# kubectl apply -f storageclass-nfs.yaml
storageclass.storage.k8s.io/harbor created
[root@k8s-master nfs]# kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
harbor storage.pri/nfs Delete Immediate true 21s
[root@k8s-master nfs]# kubectl apply -f rbac.yaml
serviceaccount/nfs-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created
deployment.apps/nfs-client-provisioner created
[root@k8s-master nfs]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-f475fc549-7292r 1/1 Running 0 55s
[root@k8s-master nfs]#
创建测试文件
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "harbor" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
pvc是bound状态成功
[root@k8s-master nfs]# kubectl apply -f test-claim.yaml
persistentvolumeclaim/test-claim created
[root@k8s-master nfs]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Bound pvc-89a2f647-29af-4d72-9e2d-b2503e3dfc6c 1Mi RWX harbor 14s
[root@k8s-master nfs]#
更多推荐
已为社区贡献4条内容
所有评论(0)