K8s(RBD动态存储)对接 Ceph14.2.13 Nautilus版本续...(1)
首先创建资源ceph资源池[root@ceph-node01 ceph-deploy]# rbd create -p k8s --image rbd-demo2.img --size 2G查看[root@ceph-node01 ceph-deploy]# rbd info k8s/rbd-demo2.imgrbd image 'rbd-demo2.img':size 4 GiB in 1024 o
·
首先创建资源ceph资源池
[root@ceph-node01 ceph-deploy]# rbd create -p k8s --image rbd-demo2.img --size 2G
查看
[root@ceph-node01 ceph-deploy]# rbd info k8s/rbd-demo2.img
rbd image 'rbd-demo2.img':
size 4 GiB in 1024 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 3a8f453eae84
block_name_prefix: rbd_data.3a8f453eae84
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Nov 11 23:48:45 2020
access_timestamp: Wed Nov 11 23:48:45 2020
modify_timestamp: Wed Nov 11 23:48:45 2020
创建k8s用户和密码
[root@ceph-node01 ceph-deploy]# ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=k8s'
[client.kube]
key = AQCoCaxfomqKCBAA7htV95TZtECWZOsOH5dnCA==
[root@ceph-node01 ceph-deploy]# ceph auth get-key client.admin | base64
QVFCcndLdGZtMCtaT2hBQWFZMVpZdlJZVEhXbE5TNS82SmlVY0E9PQ==
[root@ceph-node01 ceph-deploy]# ceph auth get-key client.kube | base64
QVFDb0NheGZvbXFLQ0JBQTdodFY5NVRadEVDV1pPc09INWRuQ0E9PQ==
k8s所有节点上安装 ceph-common包,才能正常使用cephfs
yum -y install ceph-common
将密钥和配置上传到k8s所有节点
scp -r ceph.client.admin.keyring ceph.conf root@192.168.8.111:/etc/ceph/
k8s任意一个节点操作可以获取到ceph集群的信息
[root@cc111 ceph]# ceph auth get-or-create client.kube
[client.kube]
key = AQCoCaxfomqKCBAA7htV95TZtECWZOsOH5dnCA==
[root@cc111 ceph]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQBrwKtfm0+ZOhAAaY1ZYvRYTHWlNS5/6JiUcA==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
----------------------------------------------------------------
[root@cc111 ceph]# ceph -s
cluster:
id: 8cba000d-bafe-4d00-839a-663d6086052e
health: HEALTH_WARN
application not enabled on 1 pool(s)
services:
mon: 3 daemons, quorum ceph-node01,ceph-node02,ceph-node03 (age 2h)
mgr: ceph-node01(active, since 8h), standbys: ceph-node02, ceph-node03
osd: 3 osds: 3 up (since 7h), 3 in (since 7h)
data:
pools: 2 pools, 128 pgs
objects: 37 objects, 71 MiB
usage: 3.2 GiB used, 5.8 GiB / 9.0 GiB avail
pgs: 128 active+clean
k8s secret 文件
[root@cc110 ceph]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
namespace: default
data:
key: QVFCcndLdGZtMCtaT2hBQWFZMVpZdlJZVEhXbE5TNS82SmlVY0E9PQ==
# ceph auth get-key client.admin | base64
type: "kubernetes.io/rbd"
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-kube-secret
namespace: default
data:
key: QVFDb0NheGZvbXFLQ0JBQTdodFY5NVRadEVDV1pPc09INWRuQ0E9PQ==
# ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube'
# ceph auth get-key client.kube | base64
type: "kubernetes.io/rbd"
注意这里需要安装社区 RBD驱动才能挂载pv
https://github.com/kubernetes-retired/external-storage/tree/master/ceph/rbd/deploy/rbac
#角色模式
[root@cc110 ceph]# ll rbac-rdb/
总用量 24
-rw-r--r-- 1 root root 275 11月 12 02:36 clusterrolebinding.yaml
-rw-r--r-- 1 root root 743 11月 12 02:33 clusterrole.yaml
-rw-r--r-- 1 root root 484 11月 12 02:34 deployment.yaml
-rw-r--r-- 1 root root 255 11月 12 02:36 rolebinding.yaml
-rw-r--r-- 1 root root 260 11月 12 02:34 role.yaml
-rw-r--r-- 1 root root 70 11月 12 02:35 serviceaccount.yaml
------------------------------------------------------------------------------------------------
[root@cc110 rbac-rdb]# NAMESPACE=default
[root@cc110 rbac-rdb]# sed -r -i "s/namespace: [^ ]+/namespace: $NAMESPACE/g" ./clusterrolebinding.yaml ./rolebinding.yaml
[root@cc110 rbac-rdb]# kubectl apply -f .
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
serviceaccount/rbd-provisioner created
启动成功
[root@cc110 ceph]# kubectl get pods | grep rbd
rbd-provisioner-76f6bc6669-xlqfv 1/1 Running 0 28m
可参考社区连接
https://github.com/kubernetes-retired/external-storage/tree/master/ceph/rbd/deploy
创建StorageClass
[root@cc110 ceph]# cat sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
#provisioner: kubernetes.io/rbd
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.8.100:6789,192.168.8.102:6789,192.168.8.101:6789
adminId: admin
adminSecretName: ceph-admin-secret
adminSecretNamespace: default
pool: k8s
userId: kube
userSecretName: ceph-kube-secret
userSecretNamespace: default
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
#reclaimPolicy: Retain
再绑定PVC
[root@cc110 ceph]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ceph-rbd-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
创建一个POD测试
[root@cc110 ceph]# cat nginx-tres.yaml
apiVersion: v1
kind: Pod
metadata:
name: ceph-pod1
spec:
# nodeName: k8s-node02
containers:
- name: nginx
image: nginx:alpine
volumeMounts:
- name: ceph-rdb-vol1
mountPath: /usr/share/nginx/html
readOnly: false
volumes:
- name: ceph-rdb-vol1
persistentVolumeClaim:
claimName: ceph-rbd-claim
读写都可以
[root@cc110 ceph]# kubectl exec ceph-pod1 -- df -h
Filesystem Size Used Available Use% Mounted on
overlay 33.2G 2.9G 30.2G 9% /
tmpfs 64.0M 0 64.0M 0% /dev
tmpfs 1.8G 0 1.8G 0% /sys/fs/cgroup
/dev/sda3 33.2G 2.9G 30.2G 9% /dev/termination-log
/dev/sda3 33.2G 2.9G 30.2G 9% /etc/resolv.conf
/dev/sda3 33.2G 2.9G 30.2G 9% /etc/hostname
/dev/sda3 33.2G 2.9G 30.2G 9% /etc/hosts
shm 64.0M 0 64.0M 0% /dev/shm
/dev/rbd0 1.9G 6.0M 1.9G 0% /usr/share/nginx/html
tmpfs 1.8G 12.0K 1.8G 0% /run/secrets/kubernetes.io/serviceaccount
tmpfs 1.8G 0 1.8G 0% /proc/acpi
tmpfs 64.0M 0 64.0M 0% /proc/kcore
tmpfs 64.0M 0 64.0M 0% /proc/keys
tmpfs 64.0M 0 64.0M 0% /proc/timer_list
tmpfs 64.0M 0 64.0M 0% /proc/timer_stats
tmpfs 64.0M 0 64.0M 0% /proc/sched_debug
tmpfs 1.8G 0 1.8G 0% /proc/scsi
tmpfs 1.8G 0 1.8G 0% /sys/firmware
[root@cc110 ceph]# kubectl exec ceph-pod1 -- ls /usr/share/nginx/html
123.txt
lost+found
StatefulSet控制测试
只需创建sc即可
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
#provisioner: kubernetes.io/rbd
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.8.100:6789,192.168.8.102:6789,192.168.8.101:6789
adminId: admin
adminSecretName: ceph-admin-secret
adminSecretNamespace: default
pool: k8s
userId: kube
userSecretName: ceph-kube-secret
userSecretNamespace: default
fsType: xfs
imageFormat: "2"
imageFeatures: "layering"
#reclaimPolicy: Retain
kubectl apply -f test-nginx.yaml
[root@cc110 ceph]# cat test-nginx.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: "ceph-rbd" <<<---写上sc控制器的名字即可
#ceph.com/storage-class: "ceph-rbd-claim"
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
创建之后自动绑定PV,PVC
[root@cc110 ceph]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-7c63afe2-288c-43f9-9f55-67b69a3604be 1Gi RWO Delete Bound default/www-web-0 ceph-rbd 143m
pvc-82d7e36f-3976-4192-b6f2-63e45a1d615c 1Gi RWO Delete Bound default/www-web-1 ceph-rbd 143m
pvc-c50ddec9-8ae1-455c-b5a5-cc941190e718 1Gi RWO Delete Bound default/www-web-2 ceph-rbd 142m
[root@cc110 ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound pvc-7c63afe2-288c-43f9-9f55-67b69a3604be 1Gi RWO ceph-rbd 143m
www-web-1 Bound pvc-82d7e36f-3976-4192-b6f2-63e45a1d615c 1Gi RWO ceph-rbd 143m
www-web-2 Bound pvc-c50ddec9-8ae1-455c-b5a5-cc941190e718 1Gi RWO ceph-rbd 142m
最后查看pods全部runing
[root@cc110 ceph]# kubectl get pods | grep web
web-0 1/1 Running 0 142m
web-1 1/1 Running 0 142m
web-2 1/1 Running 0 142m
下节介绍ceph集群搭建
更多推荐
已为社区贡献82条内容
所有评论(0)