一、使用持久化存储StorageClass(ceph挂载)

一、准备好ceph服务器,和创建k8s secret(第三步用到)


# 在k8s集群中所有节点安装 ceph-common # 需要使用kubelet使用rdb命令map附加rbd创建的image
yum install -y ceph-common
 
# 创建 osd pool 在ceph的mon或者admin节点
[root@bd-server-2 ~]# ceph osd pool create kube 128
pool 'ceph_rbd' created
[root@bd-server-2 ~]# rbd pool init kube
[root@bd-server-2 ~]# ceph osd pool application enable kube rbd #启动kube作为rdb存储池

# 创建k8s访问ceph的用户 在ceph的mon或者admin节点
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring
 
# 查看key 在ceph的mon或者admin节点
ceph auth get-key client.admin
ceph auth get-key client.kube
 
# 创建 admin secret # CEPH_ADMIN_SECRET 替换为 client.admin 获取到的key export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system
 
# 在 dev 命名空间创建pvc用于访问ceph的 secret # CEPH_KUBE_SECRET 替换为 client.kube 获取到的key export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg=='
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=dev   #和pvc同一个命名空间
 
# 查看 secret
kubectl get secret ceph-user-secret -o yaml -n dev
kubectl get secret ceph-secret -o yaml -n kube-system

二、开启rbac权限

RBAC基于角色的访问控制–全拼Role-Based Access Control
根据rbac.yaml 文件创建Service Account

apiVersion: v1
kind: ServiceAccount  #创建一个账户,主要用来管理ceph provisioner在k8s集群中运行的权
metadata:
  name: rbd-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
 - apiGroups: [""]
   resources: ["endpoints"]
   verbs: ["get", "list", "watch", "create", "update", "patch"]
 - apiGroups: [""]
   resources: ["services"]
   resourceNames: ["kube-dns"]
   verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system

这个文件是创建授权账户。为什么要授权?在K8S中,我们知道有 ApiServer 组件,它可以管理我们创建的 deployment, pod,service等资源,但是有些资源它是管不到的,比如说 K8S本身运行需要的组件等等,同样StorageClass这种资源它也管不到,所以,需要授权账户。

我们在master节点执行

[root@k8s-master1 static]# kubectl apply -f rbac.yaml 
serviceaccount/rbd-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created

三、 创建StorageClass,指定provisioner

ceph-provisioner镜像包下载链接:
https://pan.baidu.com/s/1iSdea0TpdlyPUAYoaqjgsQ?pwd=u50u
提取码:u50u

创建ceph-sc-provisioner.yaml 文件

[root@k8s-master1 static]# cat ceph-sc-provisioner.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rbd-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "quay.io/external_storage/rbd-provisioner:latest"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner


---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-rbd
provisioner: ceph.com/rbd
parameters:
  monitors: 10.0.19.127:6789,10.0.19.129:6789,10.0.19.130:6789
  adminId: admin   #k8s访问ceph的用户
  adminSecretName: ceph-secret        #secret名字
  adminSecretNamespace: kube-system   #secret加命名空间
  pool: kube      #ceph的rbd进程池
  userId: kube     #k8s访问ceph的用户
  userSecretName: ceph-user-secret    #secret名字,不需要加命名空间
  fsType: ext4
  imageFormat: "2"
  imageFeatures: "layering"
reclaimPolicy: Retain

PS:nfs-client-provisioner这个镜像的作用,它通过k8s集群内置的ceph驱动,挂载远端的ceph服务器到本地目录,然后将自身作为storageprovisioner,然后关联到storageclass资源。

在master上创建

[root@k8s-master1 static]# kubectl apply -f ceph-sc-provisioner.yaml 
deployment.apps/rbd-provisioner created
storageclass.storage.k8s.io/ceph-rbd created

四、基于StorageClass创建一个pvc,看看动态生成的pv是什么效果:

[root@k8s-master1 ~]# cat pvc-sc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-sc-claim
  namespace: dev
spec:
  storageClassName: ceph-rbd
  accessModes:
    - ReadWriteOnce
    - ReadOnlyMany
  resources:
    requests:
      storage: 500Mi
[root@k8s-master1 ~]# kubectl apply -f pvc-sc.yaml 
persistentvolumeclaim/nginx created
[root@k8s-master1 static]# kubectl get pvc -n dev
NAME            STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
ceph-sc-claim   Bound    pvc-751e4b5d-7a29-4da8-ba40-b7dfb6e86320   500Mi      RWO,ROX        ceph-rbd       9m40s
[root@k8s-master1 static]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE
pvc-751e4b5d-7a29-4da8-ba40-b7dfb6e86320   500Mi      RWO,ROX        Delete           Bound    dev/ceph-sc-claim   ceph-rbd                5m58s

五、创建pod测试

创建pod,申明PVC进行测试

[root@k8s-master1 ~]# cat nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx
  name: nginx
  namespace: dev
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:    # 我们这里将nginx容器默认的页面目录挂载
          - name: html-files
            mountPath: "/usr/share/nginx/html"
      volumes:
        - name: html-files
          persistentVolumeClaim:
            claimName: ceph-sc-claim  #pvc的name
            

清空前面实验的pvc,由于pvc绑定了pv,直接删除pv删除不掉:kubectl delete pvc --all 先删pvc再删pv
如果有pod绑定,则需要先删除pod然后删除pvc,再删除pv,如果还需要清理后端存储,则最后根据情况删除后端存储数据
测试
在master上创建

[root@k8s-master1 ~]# kubectl apply -f nginx.yaml 
deployment.apps/nginx unchanged
[root@k8s-master1 static]# kubectl get pod -o wide -n dev
NAME                     READY   STATUS    RESTARTS   AGE    IP             NODE        NOMINATED NODE   READINESS GATES
nginx-5767cf6c4d-9vqt8   1/1     Running   0          112s   10.244.0.239   k8s-node1   <none>           <none>
[root@k8s-node1 ~]# lsblk -l|grep rbd0
rbd0             252:0    0  500M  0 disk /var/lib/kubelet/pods/45ac8ba0-5cc3-4c34-99c6
[root@k8s-node1 ~]# echo 'hellow'>/var/lib/kubelet/pods/45ac8ba0-5cc3-4c34-99c6-2e2abb4a1a33/volumes/kubernetes.io~rbd/pvc-828c6a80-9b81-4c49-9ebb-5167b7653c99/index.html
[root@k8s-node1 ~]# curl 10.244.0.239
hellow
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐