1、在ceph集群中创建rbd存储池、镜像及普通用户

1.1、存储池接镜像配置

创建存储池

root@u20-deploy:~# ceph osd pool create rbd-test-pool1 32 32

在创建的存储池上启用块存储

root@u20-deploy:~# ceph osd pool application enable rbd-test-pool1 rbd
enabled application 'rbd' on pool 'rbd-test-pool1'

对存储池进行初始化

root@u20-deploy:~# rbd pool init -p rbd-test-pool1

创建镜像

rbd create ceph-img01 --size 5G --pool rbd-test-pool1 --image-format 2 --image-feature layering 

查看镜像

(每个镜像对应1个pod,rbd这种挂载方式主要使用场景是是给mysql、redis主从集群等statefulset类型的应用使用的,nginx这中无状态的应用的代码目录需要挂载并共享,主要是cephfs)

root@u20-deploy:~# rbd ls --pool rbd-test-pool1
ceph-img01

验证镜像信息

root@u20-deploy:~# rbd --image ceph-img01 --pool rbd-test-pool1 info
rbd image 'ceph-img01':
        size 5 GiB in 1280 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 11a774b295af
        block_name_prefix: rbd_data.11a774b295af
        format: 2
        features: layering
        op_features: 
        flags: 
        create_timestamp: Wed Oct 20 16:20:37 2021
        access_timestamp: Wed Oct 20 16:20:37 2021
        modify_timestamp: Wed Oct 20 16:20:37 2021

1.2、k8s master和node节点安装ceph-common

# 增加key
root@k8-master1:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' |apt-key add -
OK

# 配置更新源
root@k8-node1:~# cat /etc/apt/sources.list
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal main

# 安装
root@k8-master1:~# apt update && apt install ceph-common -y

1.3、创建普通用户及设置权限

创建ceph普通用户与授权

root@u20-deploy:~# ceph auth get-or-create client.ceph-user01 mon 'allow r' osd 'allow * pool=rbd-test-pool1'
[client.ceph-user01]
        key = AQDu4G9hSNPLChAAr1uvWsPBqLWYMpj3srLojQ==

验证权限

root@u20-deploy:~# ceph auth get client.ceph-user01
exported keyring for client.ceph-user01
[client.ceph-user01]
        key = AQDu4G9hSNPLChAAr1uvWsPBqLWYMpj3srLojQ==
        caps mon = "allow r"
        caps osd = "allow * pool=rbd-test-pool1"

导出用户的授权信息到keyring文件

root@u20-deploy:~# ceph auth get client.ceph-user01 -o ceph.client.ceph-user01.keyring
exported keyring for client.ceph-user01

root@u20-deploy:~# cat ceph.client.ceph-user01.keyring 
[client.ceph-user01]
        key = AQDu4G9hSNPLChAAr1uvWsPBqLWYMpj3srLojQ==
        caps mon = "allow r"
        caps osd = "allow * pool=rbd-test-pool1"

基于ceph提供的rbd实现存储卷的动态提供,有两种实现方式:

1.通过宿主机的keyring文件挂载rbd (需要复制/etc/conf/ceph.conf 和ceph.client.ceph-user01.keyring)
2.通过将keyring中key定义为k8s中的secret,然后pod通过secret挂载rbd。

配置文件认证方式:

需要拷贝/etc/conf/ceph.conf 和ceph.client.ceph-user01.keyring k8s master和node节点都需要安装

scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.11:/etc/ceph
scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.12:/etc/ceph
scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.13:/etc/ceph
scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.17:/etc/ceph
scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.18:/etc/ceph
scp /etc/ceph/ceph.conf ceph.client.ceph-user01.keyring 192.168.2.19:/etc/ceph

登录到k8s node节点使用新创建的普通用户,测试是否可以获取ceph集群的状态。

root@k8-node1:~# ceph --user ceph-user01 -s
  cluster:
    id:     6618d203-a34a-4339-876a-9a9cee0b0ed3
    health: HEALTH_OK
 
  services:
    mon: 2 daemons, quorum u20-mon1,u20-mon2 (age 3w)
    mgr: u20-mgr1(active, since 6w), standbys: u20-mgr2
    osd: 9 osds: 9 up (since 6w), 9 in (since 6w)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 4 objects, 35 B
    usage:   167 MiB used, 1.8 TiB / 1.8 TiB avail
    pgs:     33 active+clean

k8s node节点验证是否可以查看ceph 新创建的存储池的镜像,如果可以看到,则证明权限没问题。

root@k8-node1:~# rbd --id ceph-user01 ls --pool=rbd-test-pool1
ceph-img01

2、ceph rbd在k8s中的使用示例

2.1、k8s集群的master和node节点需要配置ceph集群中的hosts解析

vim /etc/hosts
192.168.2.71 ceph-node01
192.168.2.72 ceph-node02
192.168.2.73 ceph-node03
192.168.2.74 u20-mon1
192.168.2.75 u20-mon2
192.168.2.76 u20-mgr1
192.168.2.77 u20-mgr2

2.2、pod中基于配置文件使用rbd

2.2.1 pod的yaml文件

# vi case1-busybox-keyring.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: rbd-data1
      mountPath: /data
  volumes:
    - name: rbd-data1
      rbd:
        monitors:
        - '192.168.2.74:6789'
        - '192.168.2.75:6789'
        pool: rbd-test-pool1
        image: ceph-img01
        fsType: xfs
        readOnly: false
        user: ceph-user01
        keyring: /etc/ceph/ceph.client.ceph-user01.keyring 

2.2.2 创建pod

# kubectl apply -f case1-busybox-keyring.yaml
pod/busybox created

2.2.3 验证pod创建

# kubectl describe pod busybox
...
Events:
  Type    Reason                  Age   From                     Message
  ----    ------                  ----  ----                     -------
  Normal  Scheduled               28s   default-scheduler        Successfully assigned default/busybox to 192.168.2.17
  Normal  SuccessfulAttachVolume  28s   attachdetach-controller  AttachVolume.Attach succeeded for volume "rbd-data1"

2.2.3 查看pod中rbd的挂载并测试写入文件

kubectl exec busybox -it -- sh
/ # df -h
Filesystem                Size      Used Available Use% Mounted on
...
/dev/rbd0                 5.0G     68.1M      4.9G   1% /data
...

/ # cd /data/
/data # echo 'ceph rbd write test v1' > test

2.2.4 查看pod所在的node节点的rbd挂载(pod实际是使用宿主机的内核进行挂载的)

root@k8-node1:~# df -h
文件系统        容量  已用  可用 已用% 挂载点
...
/dev/rbd0       5.0G   69M  5.0G    2% /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/rbd-test-pool1-image-ceph-img01
...

2.3、deployment中的pod挂载rbd

2.3.1 yaml文件

vim case2-nginx-keyring.yaml              
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.21.1
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.2.74:6789'
            - '192.168.2.75:6789'
            pool: rbd-test-pool1
            image: ceph-img01
            fsType: xfs
            readOnly: false
            user: ceph-user01
            keyring: /etc/ceph/ceph.client.ceph-user01.keyring

2.3.2 创建deployment

# kubectl apply -f case2-nginx-keyring.yaml 
deployment.apps/nginx-deployment created

2.3.3 进入pod中查看挂载

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl exec nginx-deployment-67cb7d5bcc-xrfss -it -- sh
# df -h
...
/dev/rbd0       5.0G   69M  5.0G   2% /data
...

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl exec nginx-deployment-67cb7d5bcc-xrfss -it -- bash
root@nginx-deployment-67cb7d5bcc-xrfss:/# cat /data/test 
ceph rbd write test v1

2.3.4 在pod所在节点查看rbd挂载

root@k8-node1:~# df -h |grep rbd
/dev/rbd0       5.0G   69M  5.0G    2% /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/rbd-test-pool1-image-ceph-img01

root@k8-node1:~# rbd showmapped
id  pool            namespace  image       snap  device   
0   rbd-test-pool1             ceph-img01  -     /dev/rbd0

2.4、k8s使用secret方式挂载rbd

将key先定义为secret,然后再挂载到pod,每个k8s node节点不在需要报错keyring文件。

2.4.1 创建secret

# 查看ceph 普通用的key
root@k8-node1:~# cat /etc/ceph/ceph.client.ceph-user01.keyring 
[client.ceph-user01]
        key = AQDu4G9hSNPLChAAr1usPBqLWYMpj3srLojQ==
        caps mon = "allow r"
        caps osd = "allow * pool=rbd-test-pool1"
# 使用base64对key进行加密
root@k8-node1:~# echo AQDu4G9hSNPLAAr1uvWsPBqLWYMpj3srLojQ== |base64
QVFEdTRHOWhTTlBMQ2hBQXIxdXZXc1BCcUx1wajNzckxvalE9PQo=

# 创建secret yaml
# vim case3-secret-client-shijie.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-ceph-user01
type: "kubernetes.io/rbd"
data:
  key: QVFEdTRHOWhTTlBMQ2hBQXXZXc1BCcUxXWU1wajNzckxvalE9PQo=

# 创建secret并查看
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case3-secret-client-shijie.yaml 
secret/ceph-secret-ceph-user01 created

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get secret
NAME                      TYPE                                  DATA   AGE
ceph-secret-ceph-user01   kubernetes.io/rbd                     1      6s

2.4.2 Deployment中的pod使用secret方式挂载rbd

和使用授权文件不通的是,secret方式需要指定上面创建的secret名字

# deployment yaml
# vim case4-nginx-secret.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.21.1
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.2.74:6789'
            - '192.168.2.75:6789'
            pool: rbd-test-pool1
            image: ceph-img01
            fsType: xfs
            readOnly: false
            user: ceph-user01
            secretRef:
              name: ceph-secret-ceph-user01

# 创建deployment
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case4-nginx-secret.yaml 
deployment.apps/nginx-deployment created

# 进入pod内验证挂载是否成功
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get pod -o wide
NAME                                              READY   STATUS    RESTARTS   AGE    IP               NODE           NOMINATED NODE   READINESS GATES
alpine-nginx1.80-yun-deployment-d4b48cc74-88s6r   1/1     Running   0          2d7h   10.100.112.58    192.168.2.19   <none>           <none>
nginx-deployment-77b65f68c6-jdpxb                 1/1     Running   0          11s    10.100.172.210   192.168.2.17   <none>           <none>

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl exec nginx-deployment-77b65f68c6-jdpxb -it -- bash

root@nginx-deployment-77b65f68c6-jdpxb:/# df -h |grep rbd
/dev/rbd0       5.0G   69M  5.0G   2% /data

2.5、使用动态存储类结合PV/PVC挂载

k8s master(使用ceph admin账号) -> ceph集群创建PV -> k8s集群关联PVC
k8s master(使用普通用 ceph-user01) -> 创建挂载

在k8s集群中创建ceph admin账号的secret

# 在ceph集群中查看admin账号的key
root@u20-deploy:/etc/ceph# cat ceph.client.admin.keyring 
[client.admin]
        key = AQBK9TVhfewHABAAkMsNahi635RxY6vZP6g==
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"

# base64加密
root@u20-deploy:/etc/ceph# echo AQBK9TVhfewAAkPMNahi63R05RxY6vZP6g== |base64
QVFCSzlUVmhXdIQUJBQWtQTXNOYWhpNjNSMDVk2dlpQNmc9PQo=

# 创建admin secret yaml
vim case5-secret-admin.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFCSzlUVmhXdIQUJBQWtQTXNOYWhpNjNSMDVk2dlpQNmc9PQo=

# 创建admin secret
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created

# 查看是否创建成功
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get secret
NAME                      TYPE                                  DATA   AGE
ceph-secret-admin         kubernetes.io/rbd                     1      8s
ceph-secret-ceph-user01   kubernetes.io/rbd                     1      26m
default-token-5ddfn       kubernetes.io/service-account-token   3      32d

定义storage class模板 yaml

# vim case6-ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-test01
  annotations:
    storageclass.kubernetes.io/is-default-class: "true" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.2.74:6789,192.168.2.75:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default
  pool: rbd-test-pool1
  userId: ceph-user01
  userSecretName: ceph-secret-ceph-user01

定义pvc的模板yaml

# vim case7-mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-test01
  resources:
    requests:
      storage: '5Gi'

创建storage class和pvc

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case6-ceph-storage-class.yaml 
storageclass.storage.k8s.io/ceph-storage-class-test01 created

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case7-mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created

查看创建是否成功

# pv
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                           STORAGECLASS                REASON   AGE
pvc-4d226ff2-b3cd-4106-9b83-fdcc2c289dc0   5Gi        RWO            Delete           Bound    default/mysql-data-pvc          ceph-storage-class-test01            5s

# pvc
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS                AGE
mysql-data-pvc   Bound    pvc-4d226ff2-b3cd-4106-9b83-fdcc2c289dc0   5Gi        RWO            ceph-storage-class-test01   7m16s

# storageclass
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get storageclasses -A
NAME                                  PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-test01 (default)   kubernetes.io/rbd   Delete          Immediate           false                  113s

在ceph集群中查看动态创建的镜像

root@u20-deploy:/etc/ceph# rbd ls --pool rbd-test-pool1
kubernetes-dynamic-pvc-c802c8c4-7336-4acb-aac3-3eb66b3fb225

使用动态创建的PVC运行单节点mysql服务

# vim case8-mysql-single.yaml 
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: 192.168.1.110/base/mysql:5.6.46      
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: magedu123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 43306
  selector:
    app: mysql

查看mysql的pod是否创建成功

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get pod
NAME                                              READY   STATUS    RESTARTS   AGE
mysql-7c6df98dc5-r29z6                            1/1     Running   0          30s

进入pod中查看挂载

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl exec mysql-7c6df98dc5-r29z6 -it -- bash

root@mysql-7c6df98dc5-r29z6:/# df -h |grep rbd
/dev/rbd0       4.9G  136M  4.8G   3% /var/lib/mysql

3、k8s中使用secret挂载cephfs

3.1、ceph 集群中配置cephfs

ceph集群部署配置cephfs,参考:Ubuntu 18.04.5 LTS Ceph集群之 cephx 认证及使用普通用户挂载RBD和CephFS - yanql - 博客园

3.2、k8s集群中配置secret

查看ceph集群中admin账号的key并使用base64加密

root@ceph-deploy:~# cat /etc/ceph/ceph.client.admin.keyring 
[client.admin]
        key = AQDlEidha1GfKhAAd+00Td2uHDbYtggYNanw==
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"

root@ceph-deploy:~# echo AQDlEidha1GfKhd+00TzUd2uHDbYtggYNanw== |base64
QVFEbEVpZGhhMUdmS2hBQWQrMDBUelVkMnVIRGdGdnWU5hbnc9PQo=

k8s集群中生成admin的secret

# admin secrect yaml
root@k8-deploy:~/k8s-yaml/ceph-case# cat case5-secret-admin.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin01
type: "kubernetes.io/rbd"
data:
  key: QVFEbEVpZGhhMUdmS2hBQWQrMDBUelVkMnVIRGdGdnWU5hbnc9PQo=

# 使用ymal生成secret
kubectl apply -f case5-secret-admin.yaml

# 查看是否生成
root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get secrets 
NAME                      TYPE                                  DATA   AGE
ceph-secret-admin01       kubernetes.io/rbd                     1      46m
ceph-secret-ceph-user02   kubernetes.io/rbd                     1      46m
default-token-5ddfn       kubernetes.io/service-account-token   3      32d

3.3、k8s集群挂载cephfs测试

将cesph admin的keyring文件复制到k8s集群的master和node节点

scp ceph.client.admin.keyring 192.168.2.11:/etc/ceph/
scp ceph.client.admin.keyring 192.168.2.12:/etc/ceph/
scp ceph.client.admin.keyring 192.168.2.13:/etc/ceph/
scp ceph.client.admin.keyring 192.168.2.17:/etc/ceph/
scp ceph.client.admin.keyring 192.168.2.18:/etc/ceph/
scp ceph.client.admin.keyring 192.168.2.19:/etc/ceph/

编写测试svc,deployment yaml,使用nginx镜像镜像挂载cephfs测试。

# 编写deploment yaml
piVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 9
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.21.1
        ports:
        - containerPort: 80

        volumeMounts:
        - name: ceph-fs-t1
          mountPath: /usr/share/nginx/html/ 
      volumes:
        - name: ceph-fs-t1
          cephfs:
            monitors:
            - '192.168.2.21:6789'
            - '192.168.2.22:6789'
            - '192.168.2.23:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin01

# 编写svc yaml
kind: Service
apiVersion: v1
metadata:
  labels:
    app: nginx-service
  name: nginx-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30081
  selector:
    app: ng-deploy-80

使用yaml创建svc,deploy

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl apply -f case9-nginx-cephfs.yaml 
deployment.apps/nginx-deployment configured

查看pod是否创建成功

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-558854958b-25g5q   1/1     Running   0          31m
nginx-deployment-558854958b-2zt9v   1/1     Running   0          31m
nginx-deployment-558854958b-7w725   1/1     Running   0          41m
nginx-deployment-558854958b-8kcmx   1/1     Running   0          31m
nginx-deployment-558854958b-96lvz   1/1     Running   0          31m
nginx-deployment-558854958b-ch6sd   1/1     Running   0          31m
nginx-deployment-558854958b-mp2ck   1/1     Running   0          31m
nginx-deployment-558854958b-qq8zz   1/1     Running   0          31m
nginx-deployment-558854958b-sbngv   1/1     Running   0          31m

进入pod查看挂载cephfs是否成功

root@k8-deploy:~/k8s-yaml/ceph-case# kubectl exec nginx-deployment-558854958b-25g5q -- df -h |grep html
192.168.2.21:6789,192.168.2.22:6789,192.168.2.23:6789:/  622G     0  622G   0% /usr/share/nginx/html

ceph集群客户端服务器使用另外的账号手动挂载cephfs到目录,并生成nginx index.html文件进行测试。

root@client1:/mnt/ceph-fs-t1# mount -t ceph mon1:6789,mon2:6789,mon3:6789:/ /mnt/ceph-fs-t1/ -o name=cephfs_user02,secretfile=/etc/ceph/cephfs_user02.key

root@client1:/mnt/ceph-fs-t1# echo cephfs nginx html >inex.html
root@client1:/mnt/ceph-fs-t1# cat inex.html 
cephfs nginx html

root@client1:/mnt/ceph-fs-t1# vim index.html 
cephfs nginx html
cephfs nginx html 222 222

测试k8s nginx svc,可以看到nginx index.html就是ceph客户端创建的。

root@k8-deploy:~/k8s-yaml/ceph-case# curl 192.168.2.17:30081
cephfs nginx html
root@k8-deploy:~/k8s-yaml/ceph-case# curl 192.168.2.17:30081
cephfs nginx html
cephfs nginx html 222 222
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐