k8s 对接ceph并使用StorageClass动态创建pvc

前提条件,已经安装ceph和k8s,具体环境信息如下:

一. 服务器环境

  • 操作系统:centos 7.6
  • ceph版本:14.2.10(nautilus)
  • k8s版本:v1.18.0
  • arch: ARM64
  • GO版本:go1.14.6
  • docker版本:19.03.12

在k8s中使用volume主要有两种使用方式,一种的静态方法,一种是动态方法,本文主要介绍动态方法。k8s集群默认不支持动态创建pvc,官方推出了external-storage插件,我这里的k8s环境是通过容器运行的,所以本教程也主要介绍通过容器使用ceph-provisioner

二. 使用cephfs

1.创建ceph-secret
[root@k8s-master cephfs]# ceph auth get-key client.admin|base64
QVF***************************************9PQ==
[root@k8s-master cephfs]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: cephfs
data:
  key: QVF***************************************9PQ==

[root@k8s-master cephfs]# kubectl create -f ceph-secret.yaml

ceph-secret中存储的ceph admin用户的认证信息必须使用base64加密

2.创建使用cephfs-provisioner需要的相关rbac信息
2.1 创建cephfs命名空间
kubectl create ns cephfs
2.2 创建rbac
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-provisioner
  namespace: cephfs
  
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
  namespace: cephfs
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns","coredns"]
    verbs: ["list", "get"]
    
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: cephfs-provisioner
  namespace: cephfs
rules:
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
subjects:
  - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: cephfs
roleRef:
  kind: ClusterRole
  name: cephfs-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: cephfs-provisioner
  namespace: cephfs
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: cephfs-provisioner
subjects:
- kind: ServiceAccount
  name: cephfs-provisioner
3.制作cephfs-provisioner镜像
3.1 下载external-storage源码
git clone https://github.com/kubernetes-retired/external-storage.git -b master

需要将源码下载到GOPATH/src进行编译

3.2 编译源码

我这里是在ARM64架构下运行,所以编译的目标架构需指定为arm64,修改Makefile中的如下内容

cd $GOPATH/src/external-storage/ceph/cephfs
# vim Makefile
ifeq ($(REGISTRY),)
        REGISTRY = gebilaoyao/external_storage/  # 个人镜像仓库地址
endif

......

all build:
        CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -ldflags '-extldflags "-static"' -o cephfs-provisioner cephfs-provisioner.go

执行编译命令:

make   # 编译生成二进制文件cephfs-provisioner
3.3 编译生成镜像

修改Dockerfile内容

vim  Dockerfile

FROM centos:7

ENV CEPH_VERSION "nautilus"  # 修改ceph版本,根据环境自行修改
COPY RPM-GPG-KEY-CentOS-7 /tmp/RPM-GPG-KEY-CentOS-7  # 需先下载此文件
RUN rpm --import 'https://download.ceph.com/keys/release.asc' && \
  rpm --import /tmp/RPM-GPG-KEY-CentOS-7 && \
  rpm -Uvh https://download.ceph.com/rpm-$CEPH_VERSION/el7/noarch/ceph-release-1-1.el7.noarch.rpm && \
  yum install -y epel-release --nogpgcheck && \
  yum install -y ceph-common python-cephfs && \
  yum clean all

COPY cephfs-provisioner /usr/local/bin/cephfs-provisioner
COPY cephfs_provisioner/cephfs_provisioner.py /usr/local/bin/cephfs_provisioner
RUN chmod -v o+x /usr/local/bin/cephfs_provisioner

执行构建镜像命令make quick-container,下载安装包过程较慢,请耐心等待镜像制作过程

4. 容器运行cephfs-provisioner插件
[root@k8s-master cephfs]# cat cephfs-provisioner.yaml
piVersion: apps/v1
kind: Deployment
metadata:
  name: cephfs-provisioner
  namespace: cephfs
  labels:
    app: cephfs-provisioner
spec:
  replicas: 1
  selector:
    matchLabels:
      app: cephfs-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: cephfs-provisioner
    spec:
      containers:
      - name: cephfs-provisioner
        image: "gebilaoyao/ceph-fs-provisioner:latest"  # 镜像为上一步编译完成的镜像,可使用docker image ls进行查看
        resources:
          limits:
            cpu: 500m
            memory: 512Mi
          requests:
            cpu: 100m
            memory: 64Mi
        env:
        - name: PROVISIONER_NAME                # 与storageclass的provisioner参数相同
          value: ceph.com/cephfs
        - name: PROVISIONER_SECRET_NAMESPACE    # 与rbac的namespace相同
          value: cephfs
        command:
        - "/usr/local/bin/cephfs-provisioner"
        args:
        - "-id=cephfs-provisioner-1"
      serviceAccount: cephfs-provisioner
      
[root@k8s-master cephfs]# kubectl create -f cephfs-provisioner.yaml

等待cephfs-provisionerpod 创建完成。

5. 创建storageClass验证
[root@k8s-master cephfs]# cat storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: cephfs-provisioner-sc
provisioner: ceph.com/cephfs
volumeBindingMode: WaitForFirstConsumer
parameters:
  monitors: 10.0.12.95:6789  # cephfs-server-ip
  adminId: admin
  adminSecretName: ceph-secret
  adminSecretNamespace: "cephfs"
  claimRoot: /pvc-volumes
[root@k8s-master cephfs]# cat cephfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cephfs-pvc
  namespace: cephfs
  annotations:
    volume.beta.kubernetes.io/storage-class: "cephfs-provisioner-sc"
spec:
  # storageClassName: cephfs-provisioner-sc
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 100Mi

创建storageClass:

[root@k8s-master cephfs]# kubectl create -f storage-class.yaml

创建pvc:

[root@k8s-master cephfs]# kubectl create -f cephfs-pvc.yam

查看pvc状态:

[root@k8s-master cephrbd]# kubectl get pvc -A
NAMESPACE   NAME                     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS            AGE
cephfs      cephfs-pvc               Bound    pvc-a620785c-1f01-4fbc-84f4-7deb5bef067a   100Mi      RWX            cephfs-provisioner-sc   19h

可以看到创建的pvc已经自动通过storageClass创建pv进行绑定,处于Bound状态

二. 使用cephrbd

1.创建ceph-secret
[root@k8s-master cephrbd]# ceph auth get-key client.admin|base64
QVF***************************************9PQ==
[root@k8s-master cephrbd]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: default
data:
  key: QVF***************************************9PQ==

[root@k8s-master cephfs]# kubectl create -f ceph-secret.yaml
2.2 创建rbac
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns","coredns"]
    verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner
3.制作cephrbd-provisioner镜像
3.1 下载external-storage源码
git clone https://github.com/kubernetes-retired/external-storage.git -b master

需要将源码下载到GOPATH/src进行编译

3.2 编译源码

在rbd的编译过程中,需要下载相关依赖包,可以在$GOPATH/src/external-storage/ceph/rbd目录下,执行以下命令:

go get ./...

其中会有一个github.com/kubernetes-incubator/external-storage/ceph/rbd/pkg/provision的依赖包无法下载,这个依赖已经移入到external-storage/ceph/rbd/pkg/provision/下,可修改源码后再进行编译,将这个依赖包名修改为:

external-storage/ceph/rbd/pkg/provision

我这里是在ARM64架构下运行,所以编译的目标架构需指定为arm64,修改Makefile中的如下内容,

[root@k8s-master rbd]# cd $GOPATH/src/external-storage/ceph/rbd
[root@k8s-master rbd]# cat Makefile
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

ifeq ($(REGISTRY),)
        REGISTRY = gebilaoyao/cephrbd-provisioner/
endif

ifeq ($(VERSION),)
        VERSION = latest
endif

IMAGE = $(REGISTRY)rbd-provisioner:$(VERSION)
MUTABLE_IMAGE = $(REGISTRY)rbd-provisioner:latest

all build:
        CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -ldflags '-extldflags "-static"' -o rbd-provisioner ./cmd/rbd-provisioner
.PHONY: all build

container: build quick-container
.PHONY: container

quick-container:
        docker build -t $(MUTABLE_IMAGE) .
        docker tag $(MUTABLE_IMAGE) $(IMAGE)
.PHONY: quick-container

push: container
        docker push $(IMAGE)
        docker push $(MUTABLE_IMAGE)
.PHONY: push

test:
        go test ./...
.PHONY: test

clean:
        rm -f rbd-provisioner
.PHONY: clean

执行编译命令:

make   # 编译生成二进制文件cephrbd-provisioner
3.3 编译生成镜像

修改Dockerfile内容

[root@k8s-master rbd]# cat Dockerfile
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

FROM centos:7

ENV CEPH_VERSION "nautilus"  # ceph版本
RUN rpm -Uvh https://download.ceph.com/rpm-$CEPH_VERSION/el7/noarch/ceph-release-1-1.el7.noarch.rpm && \
  yum install -y epel-release && \
  yum install -y --nogpgcheck ceph-common && \
  yum clean all

COPY rbd-provisioner /usr/local/bin/rbd-provisioner
ENTRYPOINT ["/usr/local/bin/rbd-provisioner"]

执行构建镜像命令make quick-container,下载安装包过程较慢,请耐心等待镜像制作过程

4. 容器运行cephrbd-provisioner插件
[root@k8s-master cephrbd]# cat cephfs-provisioner.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate

  selector:
    matchLabels:
      app: rbd-provisioner
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: gebilaoyao/cephrbd-provisioner:latest  # 上一步构建出的镜像
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner
      
[root@k8s-master cephfs]# kubectl create -f cephfs-provisioner.yaml

等待rbd-provisionerpod 创建完成。

5. 创建storageClass验证
[root@k8s-master cephrbd]# cat storage-class.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: cephrbd-sc
  namespace: default
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: ceph.com/rbd
reclaimPolicy: Retain
parameters:
  monitors: 10.0.12.95:6789   # ceph-server-ip
  adminId: admin
  adminSecretName: storage-secret
  adminSecretNamespace: default
  pool: cephrbd
  fsType: xfs
  userId: admin
  userSecretName: storage-secret
  imageFormat: "2"
[root@k8s-master cephrbd]# cat test-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-pvc
  namespace: default
spec:
  storageClassName: cephrbd-sc
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi

创建storageClass:

[root@k8s-master cephrbd]# kubectl create -f storage-class.yaml

创建pvc:

[root@k8s-master cephrbd]# kubectl create -f test-pvc.yaml

查看pvc状态:

[root@k8s-master cephrbd]# kubectl get pvc -A
NAMESPACE   NAME                     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS            AGE
default     ceph-pvc                 Bound    pvc-0f4f0ab3-e31a-4ab2-bf92-37d34391ca41   1Gi        RWO            cephrbd-sc              18h

可以看到创建的pvc已经自动通过storageClass创建pv进行绑定,处于Bound状态

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐