部署gluster
部署环境
vmware15

ubuntu16.04    192.168.245.133  k8s-master gluster heketi
ubuntu16.04    192.168.245.134  k8s-node   gluster 
ubuntu16.04    192.168.245.135  k8s-node   gluster 

添加硬盘
添加额外存储
在vmware中选中虚拟机->编辑虚拟机设置->列表中选中硬盘->添加

启动虚拟机后查看硬盘,多了一个5G的/dev/sdb盘

root@ubuntu:/usr/local/heketi/heketi# fdisk -l
Disk /dev/sdb: 5 GiB, 5368709120 bytes, 10485760 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

修改磁盘类型,选择gpt

  root@ubuntu:/usr/local/heketi/heketi# fdisk /dev/sdb

    Welcome to fdisk (util-linux 2.27.1).
    Changes will remain in memory only, until you decide to write them.
    Be careful before using the write command.

    Command (m for help):#依次输入g->w保存退出

查看/dev/sdb盘信息,发现磁盘类型变味了gpt

root@ubuntu:/usr/local/heketi/heketi# fdisk -l
Disk /dev/sdb: 5 GiB, 5368709120 bytes, 10485760 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: D2003869-2A4C-47F4-B600-74E60998A0A1

为磁盘/dev/sdb分区,我们这里只分一个区

root@ubuntu:/usr/local/heketi/heketi# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.27.1).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Command (m for help): #依次输入n->一直按回车->w保存退出

查看分区信息,多了一个/dev/sdb1的物理卷

root@ubuntu:/usr/local/heketi/heketi# fdisk -l
Disk /dev/sdb: 5 GiB, 5368709120 bytes, 10485760 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: D2003869-2A4C-47F4-B600-74E60998A0A1

Device     Start      End  Sectors Size Type
/dev/sdb1   2048 10485726 10483679   5G Linux filesystem

gluster部署
所有节点安装gluster

apt-get install glusterfs-server glusterfs-common glusterfs-client fuse
systemctl start glusterfs-server
systemctl enable glusterfs-server

在133节点上添加节点

gluster peer probe 192.168.245.134
gluster peer probe 192.168.245.135

不用添加本地节点,添加本地节点会报错

root@ubuntu:/usr/local/heketi/heketi# gluster peer probe 192.168.245.133
peer probe: success. Probe on localhost not needed

heketi部署
下载heketi包

   wget https://github.com/heketi/heketi/releases/download/v5.0.1/heketi-client-v5.0.1.linux.amd64.tar.gz
    wget https://github.com/heketi/heketi/releases/download/v5.0.1/heketi-v5.0.1.linux.amd64.tar.gz

解压

tar -zxvf heketi-client-v5.0.1.linux.amd64.tar.gz
tar -zxvf heketi-v5.0.1.linux.amd64.tar.gz

修改配置文件heketi.json

 "_port_comment": "Heketi Server Port Number",
    "port": "18080",#修改端口

    "_use_auth": "Enable JWT authorization. Please enable for deployment",
    "use_auth": true,#修改auth

    "_jwt": "Private keys for access",
    "jwt": {
        "_admin": "Admin has access to all APIs",
        "admin": {
          "key": "adminkey" #修改admin登录密码
        },
        "_user": "User only has access to /volumes endpoint",
        "user": {
          "key": "My Secret"
        }
    },
    ---
    "executor": "ssh",# 修改登录方式

    "_sshexec_comment": "SSH username and private key file information",
    "sshexec": {
      "keyfile": "/root/.ssh/id_rsa", #秘钥地址
      "user": "root",# 登录用户
      "port": "22",# 端口
      "fstab": "/etc/fstab"#  /etc/fstab文件负责配置Linux开机时自动挂载的分区
    },

配置节点间的免密登录
ssh-keygen #生成公钥,然后将公钥互相copy到各自的authorized_keys文件中即可
向heketi中添加cluster、device
编辑配置文件topology-sample.json

{
    "clusters": [
        {
            "nodes": [
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.245.133"
                            ],
                            "storage": [
                                "192.168.245.133"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/sdb1"
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.245.134"
                            ],
                            "storage": [
                                "192.168.245.134"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/sdb1"
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.245.135"
                            ],
                            "storage": [
                                "192.168.245.135"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/sdb1"
                    ]
                }             
            ]
        }
    ]
}

执行命令

root@ubuntu:/usr/local/heketi/heketi# ./heketi-cli --server "http://192.168.245.133:18080" --user "admin" --secret "adminkey" topology load --json topology-sample.json
Creating node 192.168.245.133 ... ID: 1204399251b630ccd34cd25c7c93a597
    Adding device /dev/sdb1
Creating node 192.168.245.134 ... ID: a77370a6e9d5ee52de9cb71a27475d4a
    Adding device /dev/sdb1 ... OK
Creating node 192.168.245.135 ... ID: 4668bb56bee757caaf6032638b9b8269
    Adding device /dev/sdb1

查看cluster

   root@ubuntu:/usr/local/heketi/heketi# ./heketi-cli --server "http://192.168.245.133:18080" --user "admin" --secret "adminkey" cluster list
    Clusters:
    Id:baf191bf97b91c6a8b8fad77e99c4ffc

查看node

   root@ubuntu:/usr/local/heketi/heketi# ./heketi-cli --server "http://192.168.245.133:18080" --user "admin" --secret "adminkey" node list
    Id:1204399251b630ccd34cd25c7c93a597 Cluster:baf191bf97b91c6a8b8fad77e99c4ffc
    Id:4668bb56bee757caaf6032638b9b8269 Cluster:baf191bf97b91c6a8b8fad77e99c4ffc
    Id:a77370a6e9d5ee52de9cb71a27475d4a Cluster:baf191bf97b91c6a8b8fad77e99c4ffc

k8s使用gluster

创建storageclass,storageclass-glusterfs.yaml
    apiVersion: storage.k8s.io/v1beta1 
    kind: StorageClass
    metadata:
      name: glusterfs
    provisioner: kubernetes.io/glusterfs #表示存储分配器,需要根据后端存储的不同而变更
    parameters:
      resturl: "http://192.168.245.133:18080" #heketi API服务提供的url
      restauthenabled: "true" #可选参数,默认值为”false”,heketi服务开启认证时必须设置为”true”
      restuser: "admin" #可选参数,开启认证时设置相应用户名;
      restuserkey: "adminkey" #可选参数,开启认证时设置相应密码;
      volumetype: "replicate:2" #可选参数,设置卷类型及其参数,如果未分配卷类型,则有分配器决定卷类型;如”volumetype: replicate:3”表示3副本的replicate卷,”volumetype: disperse:4:2”表示disperse卷,其中‘4’是数据,’2’是冗余校验,”volumetype: none”表示distribute卷

执行命令创建

kubectl apply -f storageclass-glusterfs.yaml

查看storageclass

    root@ubuntu:/usr/local/heketi/heketi# kubectl get storageclass
    NAME        PROVISIONER               AGE
    glusterfs   kubernetes.io/glusterfs   70m
创建pvc,glusterfs-pvc.yaml
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: glusterfs-test
      namespace: default
      annotations:
        volume.beta.kubernetes.io/storage-class: "glusterfs"
    spec:
      accessModes:
      - ReadWriteMany
      resources:
        requests:
          storage: 1Gi

执行命令创建

root@ubuntu:/usr/local/heketi/heketi# kubectl create -f glusterfs-pvc.yaml 
persistentvolumeclaim/glusterfs-test created

查看pvc
状态为Bound说明创建成功

root@ubuntu:/usr/local/heketi/heketi# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
glusterfs-test   Bound    pvc-f705938e-1bf9-11ea-b5d9-000c29509b70   1Gi        RWX            glusterfs      12s

查看pv
这里pv为动态创建的

root@ubuntu:/usr/local/heketi/heketi# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS   REASON   AGE
pvc-f705938e-1bf9-11ea-b5d9-000c29509b70   1Gi        RWX            Delete           Bound    default/glusterfs-test   glusterfs               81s

问题
heketi有些卷明明存在但是却删不了
直接删除heketi存储目录/var/lib/heketi/ 下的mounts/文件夹,然后> heketi.db 清空db文件,重新来
Can’t initialize physical volume “/dev/sdb1” of volume group “vg1” without –ff
这是因为没有卸载之前的vg和pv
使用命令vgremove,pvremove依次删除卷组,逻辑卷即可

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐