k8s-cephfs-test
[root@master ~]# kubectl versionClient Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.4", GitCommit:"e87da0bd6e03ec3fea7933c4b5263d151aafd07c", GitTreeState:"clean", BuildDate:"2021-02
·
[root@master ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.4", GitCommit:"e87da0bd6e03ec3fea7933c4b5263d151aafd07c", GitTreeState:"clean", BuildDate:"2021-02-18T16:12:00Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.4", GitCommit:"e87da0bd6e03ec3fea7933c4b5263d151aafd07c", GitTreeState:"clean", BuildDate:"2021-02-18T16:03:00Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"}
[root@master ~]# ceph -v
ceph version 13.2.10 (564bdc4ae87418a232fc901524470e1a0f76d641) mimic (stable)
##########################################
========================================
https://blog.51cto.com/leejia/2583381
https://www.cnblogs.com/nineep/p/8989795.html
=========================================
##########################################
#卸载重装(test)
#ceph-deploy purge master node001
#ceph-deploy purgedata master node001
#ceph-deploy forgetkeys
yum install ceph ceph-radosgw rdate -y
yum -y install ceph-deploy
pip install ceph_deploy
mkdir /var/lib/disk3/ceph && cd /var/lib/disk3/ceph
[root@master ceph]# ceph-deploy new master
[root@master ceph]# ceph-deploy install master node001
[root@master ceph]# ceph-deploy mon create master node001
[root@master ceph]# ceph-deploy --overwrite-conf mon create-initial
#将keyring文件分发到各个节点(#在各个osd节点,收集节点的keyring文件ceph-deploy gatherkeys master)
[root@master ceph]# ceph-deploy admin node001
#ceph-deploy mon add node002
ceph-deploy admin master
[root@master ceph]# ceph-deploy mgr create master node001
[root@master ceph]# ceph-deploy mds create master node001
#【master执行】擦净硬盘
#dmsetup remove ceph--6de3xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#mkfs.xfs -f /dev/sdc
ceph-deploy disk zap master /dev/sdc
ceph-deploy disk zap node001 /dev/sdc
#【master执行】创建osd节点
ceph-deploy osd create --data /dev/sdc master
ceph-deploy osd create --data /dev/sdc node001
ceph health
#ceph osd pool delete cephfs_data
ceph osd pool create cephfs_data 100
ceph osd pool create cephfs_metadata 100
使用fs new命令enable 文件系统
ceph fs new cephfs cephfs_metadata cephfs_data
ceph auth get-or-create client.cephfs mon "allow r" mds "allow rw" osd "allow rw pool=cephfs_data, allow rw pool=cephfs_metadata"
ceph auth get client.cephfs
echo "xxxxxxxxxxxxxxxxxxxxxxxx==" >> /tmp/lee.secret
[root@master helm2]# mount -t ceph 192.168.56.105:6789:/ /mnt/mycephfs -o name=cephfs,secretfile=/tmp/lee.secret
[root@master helm2]# df -h|grep ceph
192.168.56.105:6789:/ 9.9G 0 9.9G 0% /mnt/mycephfs
[root@master helm2]#
==================k8s======================
[root@master ceph]# ceph auth get-key client.cephfs | base64
QVFEVUMwSmdER1pqSlJBQWw0bGt2VTh1THR1NzhPMzJTWE9YMXc9PQ==
[root@master ceph-step]# ceph auth get-key client.admin | base64
QVFCckxFRmdzQWx6RHhBQVRzRlJ5MVZzRFJrdUNCUzRNVTJhd1E9PQ==
kubernetes 1.20版本 禁用了 selfLink。
编辑/etc/kubernetes/manifests/kube-apiserver.yaml
- --feature-gates=RemoveSelfLink=false
kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml
[root@master ceph]# cat ceph.conf
[global]
fsid = 08be718c-dc6b-4d36-be23-7c8682cfae8c
mon_initial_members = master, node001
mon_host = 192.168.56.105,192.168.56.106
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_pool_default_size = 2
[mon]
mon_allow_pool_delete = true
aved
I0305 11:36:41.341274 1 controller.go:1149] provision "kube-system/claim" class "cephfs": succeeded
I0305 11:36:41.343087 1 event.go:221] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"kube-system", Name:"claim", UID:"a2d6f80d-22a5-42ab-b51b-7d3a5bfa2425", APIVersion:"v1", ResourceVersion:"330037", FieldPath:""}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-a2d6f80d-22a5-42ab-b51b-7d3a5bfa2425
^C
[root@master ~]# kubectl logs cephfs-provisioner-7799f97d57-fw8bf -n kube-system -f
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephfs
namespace: kube-system
provisioner: ceph.com/cephfs
parameters:
monitors: 192.168.56.105:6789,192.168.56.106:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: kube-system
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: claim
namespace: kube-system
spec:
accessModes:
- ReadWriteMany
storageClassName: cephfs
resources:
requests:
storage: 10Mi
[root@master ceph-step]# kubectl get pvc -n kube-system
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
claim Bound pvc-a2d6f80d-22a5-42ab-b51b-7d3a5bfa2425 10Mi RWX cephfs 26s
[root@master ceph-step]# ls
更多推荐
已为社区贡献8条内容
所有评论(0)