k8s + heketi + glusterfs动态存储管理及扩容GlusterFS集群 一路踩坑记录
k8s + heketi + glusterfs动态存储管理及扩容GlusterFS集群
一、版本说明
k8s | 1.22.3 |
---|---|
heketi | 10.4 |
heketi-cli | 10.4 |
glusterfs | 3.12.14 |
heketi 与 heketi-cli 版本一定要一致!!!
glusterfs节点一定得至少3台
二、准备
[root@hadoop03 kubernetes]# pwd
/opt/module/heketi-client/share/heketi/kubernetes
[root@hadoop03 kubernetes]# ll
total 176
-rw-r--r-- 1 root root 5352 Jan 11 23:10 glusterfs-daemonset.json
-rw-r--r-- 1 root root 3695 Jan 12 09:36 heketi-bootstrap.json
-rw-r--r-- 1 root root 4252 Jan 12 10:06 heketi-deployment.json
-rw-r--r-- 1 root root 1109 Aug 31 21:21 heketi.json
-rw-r--r-- 1 root root 111 Aug 31 21:21 heketi-service-account.json
-rw-r--r-- 1 root root 68706 Jan 12 09:58 heketi-storage-bak.json
-rw-r--r-- 1 root root 68717 Jan 12 09:59 heketi-storage.json
-rw-r--r-- 1 root root 317 Aug 31 21:21 README.md
-rw-r--r-- 1 root root 1251 Jan 11 23:15 topology.json
-rw-r--r-- 1 root root 1251 Jan 11 23:15 topology-sample.json
[root@hadoop03 kubernetes]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hadoop03:5000/gluster-centos gluster3u12_centos7 e67c55cd4ba0 3 years ago 357MB
# 10.4
hadoop03:5000/heketi latest da6c60273963 4 months ago 515MB
所有节点!!!
[root@hadoop03 kubernetes]# modprobe dm_snapshot
[root@hadoop03 kubernetes]# modprobe dm_mirror
[root@hadoop03 kubernetes]# modprobe dm_thin_pool
三、Heketi
[root@hadoop03 kubernetes]# kubectl create -f heketi-service-account.json
[root@hadoop03 kubernetes]# kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account
[root@hadoop03 kubernetes]# kubectl create secret generic heketi-config-secret --from-file=./heketi.json
[root@hadoop03 kubernetes]# kubectl create -f heketi-bootstrap.json
secret/heketi-db-backup created
service/heketi created
deployment.extensions/heketi created
四、Glusterfs
k8s 节点添加label storagenode=glusterfs
[root@hadoop03 kubernetes]# kubectl get nodes -l storagenode=glusterfs
NAME STATUS ROLES AGE VERSION
hadoop01 Ready <none> 19h v1.22.3
hadoop02 Ready <none> 2d20h v1.22.3
hadoop03 Ready control-plane,master 2d20h v1.22.3
# 修改image hadoop03:5000/gluster-centos:gluster3u12_centos7
[root@hadoop03 log]# kubectl apply -y glusterfs-daemonset.json
[root@hadoop03 log]# kubectl get pods -l glusterfs-node=daemonset
NAME READY STATUS RESTARTS AGE
glusterfs-cw8jv 1/1 Running 1 (95m ago) 12h
glusterfs-lsgkw 1/1 Running 2 (17m ago) 12h
glusterfs-phs69 1/1 Running 0 12h
五、heketi-cli
[root@hadoop03 kubernetes]# heketi-cli --version
heketi-cli v10.4.0-release-10
5.1 修改拓扑
device设备一定得是裸设备未初始化的磁盘!!!
manage一定得是宿主机名!!!
https://github.com/heketi/heketi/issues/786
[root@hadoop03 kubernetes]# cat topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"hadoop01"
],
"storage": [
"192.168.153.101"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"hadoop02"
],
"storage": [
"192.168.153.102"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"hadoop03"
],
"storage": [
"192.168.153.103"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
}
]
}
]
}
5.2 添加拓扑
[root@hadoop03 kubernetes]# export HEKETI_CLI_SERVER=http://`kubectl get svc | grep deploy-heketi | awk '{print $3}'`:8080
问题1:如果manage 不为宿主机hostname
可能遇到:Unable to find a GlusterFS pod on host hadoop03 with a label key glusterfs-node 实际是存在的
https://github.com/heketi/heketi/issues/786
问题2:如果添加的device 不是裸设备
可能遇到:Unable to add device: Initializing device /dev/sdb failed (already initialized or contains data?): Can’t open /dev/sdb exclusively. Mounted filesystem?
见上图
heketi日志:
参考:https://blog.csdn.net/qq_15138049/article/details/122425650?spm=1001.2014.3001.5501
[root@hadoop03 kubernetes]# heketi-cli topology load --json=topology.json --user admin --secret 'My Secret'
Found node hadoop02 on cluster a7903bf6436e2e3df3a1aa1591a5dee0
Adding device /dev/sdb ... OK
Found node hadoop03 on cluster a7903bf6436e2e3df3a1aa1591a5dee0
Adding device /dev/sdb ... OK
[root@hadoop03 kubernetes]#
问题:Can’t initialize physical volume “/dev/sdb” of volume group “vg_c373a6d564b78292f6f0a9f4b9a11a3e” without -ff
[root@hadoop01 module]# pvcreate -ff -y /dev/sdb
Physical volume "/dev/sdb" successfully created.
终于添加成功之后
[root@hadoop03 kubernetes]# heketi-cli topology info --user admin --secret 'My Secret'
Cluster Id: a7903bf6436e2e3df3a1aa1591a5dee0
File: true
Block: true
Volumes:
Name: vol_257539465ea370c4ca7bd2f9a2c660d2
Size: 1
Id: 257539465ea370c4ca7bd2f9a2c660d2
Cluster Id: a7903bf6436e2e3df3a1aa1591a5dee0
Mount: 192.168.153.103:vol_257539465ea370c4ca7bd2f9a2c660d2
Mount Options: backup-volfile-servers=192.168.153.102
Durability Type: none
Snapshot: Disabled
Bricks:
Id: ef63f12877d7a654852b1b30d68cebb3
Path: /var/lib/heketi/mounts/vg_a9025a600b6960a89593371da0212684/brick_ef63f12877d7a654852b1b30d68cebb3/brick
Size (GiB): 1
Node: c2ebaa92c7467ca407736850585c090a
Device: a9025a600b6960a89593371da0212684
Nodes:
Node Id: a86e4c1eca1928988d9cd926857d0742
State: online
Cluster Id: a7903bf6436e2e3df3a1aa1591a5dee0
Zone: 1
Management Hostnames: hadoop03
Storage Hostnames: 192.168.153.103
Devices:
Id:c373a6d564b78292f6f0a9f4b9a11a3e Name:/dev/sdb State:online Size (GiB):4 Used (GiB):0 Free (GiB):4
Bricks:
Node Id: c2ebaa92c7467ca407736850585c090a
State: online
Cluster Id: a7903bf6436e2e3df3a1aa1591a5dee0
Zone: 1
Management Hostnames: hadoop02
Storage Hostnames: 192.168.153.102
Devices:
Id:a9025a600b6960a89593371da0212684 Name:/dev/sdb State:online Size (GiB):4 Used (GiB):1 Free (GiB):3
Bricks:
Id:ef63f12877d7a654852b1b30d68cebb3 Size (GiB):1 Path: /var/lib/heketi/mounts/vg_a9025a600b6960a89593371da0212684/brick_ef63f12877d7a654852b1b30d68cebb3/brick
[root@hadoop03 kubernetes]#
heketi-cli setup-openshift-heketi-storage
heketi-cli setup-openshift-heketi-storage --user admin --secret ‘My Secret’ --server http://10.1.135.146:8080
问题4:如果glusterfs节点小于3,可能遇到如下问题
[root@hadoop03 kubernetes]# heketi-cli setup-openshift-heketi-storage --user admin --secret 'My Secret'
Error: Failed to allocate new volume: No space
https://blog.lwolf.org/post/how-i-deployed-glusterfs-cluster-to-kubernetes/
问题:Error: /usr/sbin/modprobe failed
[root@hadoop03 kubernetes]# heketi-cli setup-openshift-heketi-storage --user admin --secret 'My Secret' --server http://10.1.135.146:8080
Error: /usr/sbin/modprobe failed: 1
thin: Required device-mapper target(s) not detected in your kernel.
Run `lvcreate --help' for more information.
所有节点
[root@hadoop03 kubernetes]# modprobe dm_thin_pool
问题5:如果heketi 与heketi-cli 版本不一致,可能遇到
[root@hadoop03 kubernetes]# heketi-cli setup-openshift-heketi-storage --user admin --secret 'My Secret'
Error: /usr/sbin/modprobe failed: 1
thin: Required device-mapper target(s) not detected in your kernel.
Run `lvcreate --help' for more information.
[root@hadoop03 kubernetes]# modprobe dm_thin_pool
[root@hadoop03 kubernetes]# heketi-cli setup-openshift-heketi-storage --user admin --secret 'My Secret'
Error: Failed to allocate new volume: Volume name 'heketidbstorage' already in use
这是我的版本
https://github.com/heketi/heketi/issues/1608
问题:有时依然报以上问题:Volume name ‘heketidbstorage’ already in use
需要删除掉glusterfs节点及其挂载,重新部署glusterfs (注意所有节点都执行:modprobe dm_snapshot
、 modprobe dm_mirror 、 modprobe dm_thin_pool)
### glusterfs 所处节点
rm -rf /etc/glusterfs/
rm -rf /var/lib/glusterd/
rm -rf /var/log/glusterfs/
[root@hadoop03 kubernetes]# heketi-cli setup-openshift-heketi-storage --user admin --secret 'My Secret' --server http://10.1.135.146:8080
Saving heketi-storage.json
多出来的heketi-storage.json | 修改image hadoop03:5000/heketi:latest (默认是heketi/heketi:dev)
[root@hadoop03 kubernetes]# kubectl apply -f heketi-storage.json
secret/heketi-storage-secret created
endpoints/heketi-storage-endpoints created
service/heketi-storage-endpoints created
job.batch/heketi-storage-copy-job created
[root@hadoop03 kubernetes]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default deploy-heketi-f4fc64774-lrg7k 1/1 Running 0 24m
default glusterfs-cw8jv 1/1 Running 0 10h
default glusterfs-lsgkw 1/1 Running 1 (33m ago) 10h
default glusterfs-phs69 1/1 Running 0 10h
### 等待其完成 ###
default heketi-storage-copy-job--1-x46lk 0/1 Completed 0 2m37s
问题:安装很多次都没遇到过,但在某次遇到这个job一直处于ContainerCreating 状态
describe 这个 pod,报如下错误:mount: unknow filesystem type 'glusterfs'
参考:https://zhuanlan.zhihu.com/p/51553441
对比之前安装成功的节点发现,所有节点需安装 glusterfs-fuse | glusterfs-api
yum install -y glusterfs-fuse
yum install -y glusterfs-api
删除中间组件
[root@hadoop03 kubernetes]# kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"
pod "deploy-heketi-59f8dbc97f-5rf6s" deleted
service "deploy-heketi" deleted
service "heketi" deleted
deployment.apps "deploy-heketi" deleted
replicaset.apps "deploy-heketi-59f8dbc97f" deleted
job.batch "heketi-storage-copy-job" deleted
secret "heketi-storage-secret" deleted
注意:这次是 heketi-deployment.json 第一次是 heketi-bootstrap.json
[root@hadoop03 kubernetes]# kubectl apply -f heketi-deployment.json
secret/heketi-db-backup created
service/heketi created
deployment.apps/heketi created
[root@hadoop03 kubernetes]# kubectl get pod
NAME READY STATUS RESTARTS AGE
glusterfs-cw8jv 1/1 Running 0 10h
glusterfs-lsgkw 1/1 Running 1 (38m ago) 10h
glusterfs-phs69 1/1 Running 0 10h
heketi-66885446bd-l2nmq 1/1 Running 0 7s
[root@hadoop03 kubernetes]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
glusterfs-cluster ClusterIP 10.1.3.109 <none> 1990/TCP 38h
### 之前的heketi-bootstrap.json heketi对应的svc删除了,这是新建的 heketi-deployment.json heketi svc
heketi ClusterIP 10.1.107.180 <none> 8080/TCP 38s
heketi-storage-endpoints ClusterIP 10.1.44.68 <none> 1/TCP 7m49s
kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 2d18h
修改
export HEKETI_CLI_SERVER=http://10.1.107.180:8080
拓扑信息:
[root@hadoop03 kubernetes]# heketi-cli topology info --user admin --secret 'My Secret' --server http://10.1.107.180:8080
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
File: true
Block: true
Volumes:
Name: heketidbstorage
Size: 2
Id: 158e2b7138496265cd751e1fe568539b
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Mount: 192.168.153.101:heketidbstorage
Mount Options: backup-volfile-servers=192.168.153.103,192.168.153.102
Durability Type: replicate
Replica: 3
Snapshot: Disabled
Bricks:
Id: 16483dcd55dfc7a6cf692e2316edb15a
Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_16483dcd55dfc7a6cf692e2316edb15a/brick
Size (GiB): 2
Node: dc906c2032a67d446cb31796c4f2d837
Device: f70cfcd33ecbee32f33fdef8990b430e
Id: 62d669a0222f65d8b8726f1cbdf625ba
Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_62d669a0222f65d8b8726f1cbdf625ba/brick
Size (GiB): 2
Node: 2b929d9c36443cfd9c51690c30b1ea9c
Device: 341ad7afe69ff10a262f44790400411a
Id: e98ccade9963a9c722757eefcd4c178f
Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_e98ccade9963a9c722757eefcd4c178f/brick
Size (GiB): 2
Node: 02c8d1d525267f5614f40686bb422643
Device: 6154b15d65291e82ce54058afafb7c85
Nodes:
Node Id: 02c8d1d525267f5614f40686bb422643
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop01
Storage Hostnames: 192.168.153.101
Devices:
Id:6154b15d65291e82ce54058afafb7c85 State:online Size (GiB):4 Used (GiB):2 Free (GiB):2
Known Paths: /dev/sdb
Bricks:
Id:e98ccade9963a9c722757eefcd4c178f Size (GiB):2 Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_e98ccade9963a9c722757eefcd4c178f/brick
Node Id: 2b929d9c36443cfd9c51690c30b1ea9c
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop03
Storage Hostnames: 192.168.153.103
Devices:
Id:341ad7afe69ff10a262f44790400411a State:online Size (GiB):4 Used (GiB):2 Free (GiB):2
Known Paths: /dev/sdb
Bricks:
Id:62d669a0222f65d8b8726f1cbdf625ba Size (GiB):2 Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_62d669a0222f65d8b8726f1cbdf625ba/brick
Node Id: dc906c2032a67d446cb31796c4f2d837
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop02
Storage Hostnames: 192.168.153.102
Devices:
Id:f70cfcd33ecbee32f33fdef8990b430e State:online Size (GiB):4 Used (GiB):2 Free (GiB):2
Known Paths: /dev/sdb
Bricks:
Id:16483dcd55dfc7a6cf692e2316edb15a Size (GiB):2 Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_16483dcd55dfc7a6cf692e2316edb15a/brick
[root@hadoop03 kubernetes]#
这时
[root@hadoop03 kubernetes]# kubectl get pod
NAME READY STATUS RESTARTS AGE
glusterfs-cw8jv 1/1 Running 1 (141m ago) 13h
glusterfs-lsgkw 1/1 Running 2 (63m ago) 13h
glusterfs-phs69 1/1 Running 0 13h
heketi-66885446bd-l2nmq 1/1 Running 0 161m
六、测试sc动态生成 pvc pv
[root@hadoop03 heketi-dynamic-expansion]# cat heketi-storageclass.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/glusterfs
metadata:
name: heketi-secret
#namespace: kube-system
data:
# base64 encoded. key=My Secret
key: TXkgU2VjcmV0
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://10.1.107.180:8080"
### 一下信息不加的话可能会:Failed to provision volume with StorageClass "gluster-heketi": failed to create volume: failed to create volume: see kube-controller-manager.log for details
restauthenabled: "true"
volumetype: "none"
restuser: "admin"
secretName: "heketi-secret"
secretNamespace: "default"
clusterid: "18ed8f6d4874cb5f4dd2325578e1417d"
###
[root@hadoop03 heketi-dynamic-expansion]# cat pod-use-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-use-pvc
spec:
containers:
- name: pod-use-pvc
image: hadoop03:5000/busybox:latest
command:
- sleep
- "3600"
volumeMounts:
- name: gluster-volume
mountPath: "/pv-data"
readOnly: false
volumes:
- name: gluster-volume
persistentVolumeClaim:
claimName: pvc-gluster-heketi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-gluster-heketi
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "gluster-heketi"
resources:
requests:
storage: 1Gi
注意: 很多博客未加上以下信息没出错,我则遇到 pod 一直处于pending状态
restauthenabled: "true"
volumetype: "none"
restuser: "admin"
secretName: "heketi-secret"
secretNamespace: "default"
clusterid: "18ed8f6d4874cb5f4dd2325578e1417d"
谷歌一番也无结果:https://giters.com/heketi/heketi/issues/1746
直到看到这个
https://tipsfordev.com/glusterfs-heketi-and-kubernetes-auto-provisioning-problem
https://blog.csdn.net/oyym_mv/article/details/86580157
尝试在sc配置中加上
restauthenabled: "true"
volumetype: "none"
restuser: "admin"
secretName: "heketi-secret"
secretNamespace: "default"
clusterid: "18ed8f6d4874cb5f4dd2325578e1417d"
Ohhhhh…man 你甚至不知道我得有多激动…
七、测试
7.1 topology 信息
[root@hadoop03 module]# kubectl exec -it pod-use-pvc sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /pv-data/
/pv-data # mkdir {1..10}
/pv-data # touch abc
/pv-data # ls
abc {1..10}
[root@hadoop03 module]# heketi-cli --server http://10.1.107.180:8080 --user admin --secret 'My Secret' topology info
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
File: true
Block: true
Volumes:
Name: heketidbstorage
Size: 2
Id: 158e2b7138496265cd751e1fe568539b
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Mount: 192.168.153.101:heketidbstorage
Mount Options: backup-volfile-servers=192.168.153.103,192.168.153.102
Durability Type: replicate
Replica: 3
Snapshot: Disabled
Bricks:
Id: 16483dcd55dfc7a6cf692e2316edb15a
Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_16483dcd55dfc7a6cf692e2316edb15a/brick
Size (GiB): 2
Node: dc906c2032a67d446cb31796c4f2d837
Device: f70cfcd33ecbee32f33fdef8990b430e
Id: 62d669a0222f65d8b8726f1cbdf625ba
Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_62d669a0222f65d8b8726f1cbdf625ba/brick
Size (GiB): 2
Node: 2b929d9c36443cfd9c51690c30b1ea9c
Device: 341ad7afe69ff10a262f44790400411a
Id: e98ccade9963a9c722757eefcd4c178f
Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_e98ccade9963a9c722757eefcd4c178f/brick
Size (GiB): 2
Node: 02c8d1d525267f5614f40686bb422643
Device: 6154b15d65291e82ce54058afafb7c85
Name: vol_455b772c2fc9efcaac1b0b7e21aaa774
Size: 1
Id: 455b772c2fc9efcaac1b0b7e21aaa774
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Mount: 192.168.153.101:vol_455b772c2fc9efcaac1b0b7e21aaa774
Mount Options: backup-volfile-servers=192.168.153.103,192.168.153.102
Durability Type: replicate
Replica: 3
Snapshot: Enabled
Snapshot Factor: 1.00
Bricks:
Id: 15e1d8b6bc70903ab2d1c8499a4b9144
Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_15e1d8b6bc70903ab2d1c8499a4b9144/brick
Size (GiB): 1
Node: 02c8d1d525267f5614f40686bb422643
Device: 6154b15d65291e82ce54058afafb7c85
Id: 665f1a008128ecdb97b246d48cbc72d0
Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_665f1a008128ecdb97b246d48cbc72d0/brick
Size (GiB): 1
Node: 2b929d9c36443cfd9c51690c30b1ea9c
Device: 341ad7afe69ff10a262f44790400411a
Id: ea72af0a249b04a5a69e4731c8ac6f64
Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_ea72af0a249b04a5a69e4731c8ac6f64/brick
Size (GiB): 1
Node: dc906c2032a67d446cb31796c4f2d837
Device: f70cfcd33ecbee32f33fdef8990b430e
Nodes:
Node Id: 02c8d1d525267f5614f40686bb422643
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop01
Storage Hostnames: 192.168.153.101
Devices:
Id:6154b15d65291e82ce54058afafb7c85 State:online Size (GiB):4 Used (GiB):3 Free (GiB):1
Known Paths: /dev/sdb
Bricks:
Id:15e1d8b6bc70903ab2d1c8499a4b9144 Size (GiB):1 Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_15e1d8b6bc70903ab2d1c8499a4b9144/brick
Id:e98ccade9963a9c722757eefcd4c178f Size (GiB):2 Path: /var/lib/heketi/mounts/vg_6154b15d65291e82ce54058afafb7c85/brick_e98ccade9963a9c722757eefcd4c178f/brick
Node Id: 2b929d9c36443cfd9c51690c30b1ea9c
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop03
Storage Hostnames: 192.168.153.103
Devices:
Id:341ad7afe69ff10a262f44790400411a State:online Size (GiB):4 Used (GiB):3 Free (GiB):1
Known Paths: /dev/sdb
Bricks:
Id:62d669a0222f65d8b8726f1cbdf625ba Size (GiB):2 Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_62d669a0222f65d8b8726f1cbdf625ba/brick
Id:665f1a008128ecdb97b246d48cbc72d0 Size (GiB):1 Path: /var/lib/heketi/mounts/vg_341ad7afe69ff10a262f44790400411a/brick_665f1a008128ecdb97b246d48cbc72d0/brick
Node Id: dc906c2032a67d446cb31796c4f2d837
State: online
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Zone: 1
Management Hostnames: hadoop02
Storage Hostnames: 192.168.153.102
Devices:
Id:f70cfcd33ecbee32f33fdef8990b430e State:online Size (GiB):4 Used (GiB):3 Free (GiB):1
Known Paths: /dev/sdb
Bricks:
Id:16483dcd55dfc7a6cf692e2316edb15a Size (GiB):2 Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_16483dcd55dfc7a6cf692e2316edb15a/brick
Id:ea72af0a249b04a5a69e4731c8ac6f64 Size (GiB):1 Path: /var/lib/heketi/mounts/vg_f70cfcd33ecbee32f33fdef8990b430e/brick_ea72af0a249b04a5a69e4731c8ac6f64/brick
###
[root@hadoop03 module]# heketi-cli --server http://10.1.107.180:8080 --user admin --secret 'My Secret' volume list
Id:158e2b7138496265cd751e1fe568539b Cluster:18ed8f6d4874cb5f4dd2325578e1417d Name:heketidbstorage
Id:455b772c2fc9efcaac1b0b7e21aaa774 Cluster:18ed8f6d4874cb5f4dd2325578e1417d Name:vol_455b772c2fc9efcaac1b0b7e21aaa774
[root@hadoop03 module]#
[root@hadoop03 module]# heketi-cli --server http://10.1.107.180:8080 --user admin --secret 'My Secret' volume info 455b772c2fc9efcaac1b0b7e21aaa774
Name: vol_455b772c2fc9efcaac1b0b7e21aaa774
Size: 1
Volume Id: 455b772c2fc9efcaac1b0b7e21aaa774
Cluster Id: 18ed8f6d4874cb5f4dd2325578e1417d
Mount: 192.168.153.101:vol_455b772c2fc9efcaac1b0b7e21aaa774
Mount Options: backup-volfile-servers=192.168.153.103,192.168.153.102
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 3
Snapshot Factor: 1.00
挂载
######### 挂载 ######
[root@hadoop03 module]#
[hadoop@hadoop03 testpvc]$ sudo mount -t glusterfs 192.168.153.101:vol_455b772c2fc9efcaac1b0b7e21aaa774 /testpvc
[hadoop@hadoop03 testpvc]$ cd /testpvc/
[root@hadoop03 testpvc]# ll
total 0
drwxr-sr-x 2 root 2000 6 Jan 12 17:06 {1..10}
-rw-r--r-- 1 root 2000 0 Jan 12 17:26 abc
八、pvc创建后扩容
8.1 pvc扩容
直接修改pvc requests.storage 为2G(之前是1G)
###################
### 扩容前 1G
###################
[root@hadoop03 heketi-dynamic-expansion]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-nginx Bound gluster-dev-volume 1Gi RWX 43h
pvc-gluster-heketi Bound pvc-fea426ef-b2a0-4797-83a0-f5c039cdb2ce 1Gi RWO gluster-heketi 4h29m
[root@hadoop03 heketi-dynamic-expansion]# cat pod-use-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-use-pvc
spec:
containers:
- name: pod-use-pvc
image: hadoop03:5000/busybox:latest
command:
- sleep
- "3600"
volumeMounts:
- name: gluster-volume
mountPath: "/pv-data"
readOnly: false
volumes:
- name: gluster-volume
persistentVolumeClaim:
claimName: pvc-gluster-heketi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-gluster-heketi
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "gluster-heketi"
resources:
requests:
storage: 2Gi ### 扩容修改成2G
[root@hadoop03 heketi-dynamic-expansion]# kubectl apply -f pod-use-pvc.yaml
pod/pod-use-pvc configured
persistentvolumeclaim/pvc-gluster-heketi configured
[root@hadoop03 heketi-dynamic-expansion]#
[root@hadoop03 heketi-dynamic-expansion]# kubectl describe pvc pvc-gluster-heketi
Name: pvc-gluster-heketi
Namespace: default
StorageClass: gluster-heketi
Status: Bound
Volume: pvc-fea426ef-b2a0-4797-83a0-f5c039cdb2ce
Labels: <none>
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs
volume.kubernetes.io/storage-resizer: kubernetes.io/glusterfs
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 2Gi
Access Modes: RWO
VolumeMode: Filesystem
Used By: pod-use-pvc
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal VolumeResizeSuccessful 28s volume_expand ExpandVolume succeeded for volume default/pvc-gluster-heketi
###################
### 扩容后 2G
###################
[root@hadoop03 heketi-dynamic-expansion]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-nginx Bound gluster-dev-volume 1Gi RWX 43h
pvc-gluster-heketi Bound pvc-fea426ef-b2a0-4797-83a0-f5c039cdb2ce 2Gi RWO gluster-heketi 4h30m
8.2 尝试pvc缩容
[root@hadoop03 heketi-dynamic-expansion]# cat pod-use-pvc.yaml
...
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "gluster-heketi"
resources:
requests:
storage: 1Gi ### 尝试减少存储空间
[root@hadoop03 heketi-dynamic-expansion]# kubectl apply -f pod-use-pvc.yaml
pod/pod-use-pvc configured
The PersistentVolumeClaim "pvc-gluster-heketi" is invalid: spec.resources.requests.storage: Forbidden: field can not be less than previous value
不允许缩容…
参考:
1)https://www.cnblogs.com/dukuan/p/9954094.html
2)https://www.cnblogs.com/zuoyang/p/14948802.html
3)https://blog.lwolf.org/post/how-i-deployed-glusterfs-cluster-to-kubernetes/
4)https://cloud.tencent.com/developer/article/1602930
更多推荐
所有评论(0)