k8s使用glusterfs+heketi作为后端存储
环境准备modprobe dm_thin_poolmodprobe dm-snapshotcat >/etc/modules-load.d/glusterfs.conf<<EOFdm-snapshotdm_thin_poolEOF#yum install -y glusterfs-fuse lvm2yum -y localinstall --disablerepo=* env_p
·
环境准备
modprobe dm_thin_pool
modprobe dm-snapshot
cat >/etc/modules-load.d/glusterfs.conf<<EOF
dm-snapshot
dm_thin_pool
EOF
#yum install -y glusterfs-fuse lvm2
yum -y localinstall --disablerepo=* env_package/*.rpm
#如需开启防火墙。需开放端口
iptables -N heketi
iptables -A heketi -p tcp -m state --state NEW -m tcp --dport 24007 -j ACCEPT
iptables -A heketi -p tcp -m state --state NEW -m tcp --dport 24008 -j ACCEPT
iptables -A heketi -p tcp -m state --state NEW -m tcp --dport 2222 -j ACCEPT
iptables -A heketi -p tcp -m state --state NEW -m multiport --dports 49152:49251 -j ACCEPT
service iptables save
安装
- 下载heketi
wget https://github.com/heketi/heketi/releases/download/v8.0.0/heketi-client-v8.0.0.linux.amd64.tar.gz
tar xf heketi-client-v8.0.0.linux.amd64.tar.gz
cp heketi-client/bin/heketi-cli /usr/local/bin
heketi-cli -v
- 在k8s中部署glusterfs
cd heketi-client/share/heketi/kubernetes
#如果本地下载了镜像,修改镜像拉取策略Always
kubectl create -f glusterfs-daemonset.json
部署
- 给node节点打label部署daemonset
kubectl get nodes
kubectl label node master node-1 node-2 storagenode=glusterfs
kubectl get pods -o wide
- 部署heketi server并配置权限
kubectl create -f heketi-service-account.json
kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account
- 创建 cofig secret
kubectl create secret generic heketi-config-secret --from-file=./heketi.json
- 初始化部署
kubectl create -f heketi-bootstrap.json
kubectl get pods -o wide
kubectl get svc
- 测试访问
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep deploy-heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_POD 30080:8080
#另起终端访问
curl http://localhost:30080/hello
- 配置glusterfs
cat >topology.json<<EOF
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"master"
],
"storage": [
"192.168.214.101"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"node-1"
],
"storage": [
"192.168.214.102"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"node-2"
],
"storage": [
"192.168.214.103"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
}
]
}
]
}
EOF
#设置heketi访问地址
export HEKETI_CLI_SERVER=http://localhost:30080
#或者设置svc nodePort方式,-s指定服务端地址
#heketi-cli -s http://10.16.2.95:30080 topology load --json=topology.json
heketi-cli topology load --json=topology.json
如果出现错误,需初始化磁盘
dd if=/dev/zero of=/dev/sdb bs=1k count=1
blockdev --rereadpt /dev/sdb
Can’t open /dev/sdb exclusively. Mounted filesystem?
dmsetup ls
ll /dev/mapper/
dmsetup remove xxx
- 使用 Heketi 创建一个用于存储 Heketi 数据库的 volume
heketi-cli setup-openshift-heketi-storage
kubectl create -f heketi-storage.json
kubectl get pods
kubectl get job
- 删除部署时产生的相关资源
kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"
- 部署 heketi server
kubectl create -f heketi-deployment.json
kubectl get pods -o wide
kubectl get svc
- 查看 heketi 状态信息, 配置端口转发 heketi server
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_POD 30080:8080
export HEKETI_CLI_SERVER=http://localhost:30080
#export HEKETI_CLI_SERVER=http://localhost:30080
heketi-cli cluster list
heketi-cli volume list
测试
- 创建 StorageClass ,由于没有开启认证,restuser restuserkey 可以随意写
HEKETI_SERVER=$(kubectl get svc | grep heketi | head -1 | awk '{print $3}')
echo $HEKETI_SERVER
cat >storageclass-glusterfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: gluster-heketi
provisioner: kubernetes.io/glusterfs
#reclaimPolicy: Retain
parameters:
resturl: "http://$HEKETI_SERVER:8080"
restauthenabled: "false"
gidMin: "40000"
gidMax: "50000"
volumetype: "replicate:3"
#允许对pvc扩容
allowVolumeExpansion: true
EOF
kubectl create -f storageclass-glusterfs.yaml
kubectl get sc
以上创建了一个含有三个副本的gluster的存储类型(storage-class)
volumetype中的relicate必须大于1,否则创建pvc的时候会报错:[heketi] ERROR 2017/11/14 21:35:20/src/github.com/heketi/heketi/apps/glusterfs/app_volume.go:154: Failed to create volume: replica count should be greater than 1
在这里创建的storageclass显示指定reclaimPolicy为Retain(默认情况下是Delete),删除pvc后pv以及后端的volume、brick(lvm)不会被删除。
- 创建pvc
cat >gluster-pvc-test.yaml<<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gluster1
annotations:
volume.beta.kubernetes.io/storage-class: gluster-heketi
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f gluster-pvc-test.yaml
kubectl get pvc
kubectl get pv
- 创建pod挂载测试
cat >nginx-pod.yaml<<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-gfs
labels:
name: nginx-gfs
spec:
replicas: 2
selector:
matchLabels:
name: nginx-gfs
template:
metadata:
labels:
name: nginx-gfs
spec:
containers:
- name: nginx-gfs
image: nginx:1.14
imagePullPolicy: IfNotPresent
ports:
- name: web
containerPort: 80
volumeMounts:
- name: gfs
mountPath: /usr/share/nginx/html
volumes:
- name: gfs
persistentVolumeClaim:
claimName: gluster1
EOF
#多pod共用pv
kubectl apply -f nginx-pod.yaml
kubectl get pods -o wide
- 修改文件内容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from GlusterFS!!! > /usr/share/nginx/html/index.html'
#访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID
- node 节点查看文件内容
GLUSTERFS_POD=$(kubectl get pod | grep glusterfs | head -1 | awk '{print $1}')
kubectl exec -ti $GLUSTERFS_POD /bin/sh
mount | grep heketi
cat /var/lib/heketi/mounts/vg_56033aa8a9131e84faa61a6f4774d8c3/brick_1ac5f3a0730457cf3fcec6d881e132a2/brick/index.html
- pvc扩容
edit更新pvc内存
扩容glusterfs
- 使用heketi-cli查看cluster ID和所有node ID
[root@k8s-1 ~]# heketi-cli cluster info
Error: Cluster id missing
[root@k8s-1 ~]# heketi-cli cluster list
Clusters:
Id:5dec5676c731498c2bdf996e110a3e5e [file][block]
[root@k8s-1 ~]# heketi-cli cluster info 5dec5676c731498c2bdf996e110a3e5e
Cluster id: 5dec5676c731498c2bdf996e110a3e5e
Nodes:
0f00835397868d3591f45432e432ba38
d38819746cab7d567ba5f5f4fea45d91
fb181b0cef571e9af7d84d2ecf534585
Volumes:
32146a51be9f980c14bc86c34f67ebd5
56d636b452d31a9d4cb523d752ad0891
828dc2dfaa00b7213e831b91c6213ae4
b9c68075c6f20438b46db892d15ed45a
Block: true
File: true
- 找到对应node ID
[root@k8s-1 ~]# heketi-cli node info 0f00835397868d3591f45432e432ba38
Node Id: 0f00835397868d3591f45432e432ba38
State: online
Cluster Id: 5dec5676c731498c2bdf996e110a3e5e
Zone: 1
Management Hostname: k8s-node02
Storage Hostname: 192.168.186.12
Devices:
Id:82af8e5f2fb2e1396f7c9e9f7698a178 Name:/dev/sdb State:online Size (GiB):39 Used (GiB):25 Free (GiB):14 Bricks:4
- 添加磁盘
[root@k8s-1 ~]# heketi-cli device add --name=/dev/sdc --node=0f00835397868d3591f45432e432ba38
Device added successfully
- 查看增加结果
heketi-cli node info 0f00835397868d3591f45432e432ba38
更多推荐
已为社区贡献3条内容
所有评论(0)