k8s学习之结合ceph测试
k8s学习之结合cephkubernetes pv和pvc绑定流程:创建pv,pv需要指定后端存储类型NFS等—>创建pvc,指定需要的资源—>pvc和pv绑定—>pod挂载pvc实现持久化存储kubernetes storageclass存储类动态生成pv流程:首先创建storageclass—>pvc请求已经创建的storageclass,通过storageclass来
k8s学习之结合ceph
kubernetes pv和pvc绑定流程:
创建pv,pv需要指定后端存储类型NFS等—>创建pvc,指定需要的资源—>pvc和pv绑定—>pod挂载pvc实现持久化存储
kubernetes storageclass存储类动态生成pv流程:
首先创建storageclass—>pvc请求已经创建的storageclass,通过storageclass来自动创建pv–>这样就达到通过storageclass动态生成一个pv的效果了
一、测试创建的pod直接挂载ceph rbd
kubernetes要想使用ceph,需要在k8s的每个node节点安装ceph-common,
[root@ms-adm ceph]# scp /etc/yum.repos.d/ceph.repo root@192.168.1.11:/etc/yum.repos.d/
[root@master ~]# yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/
[root@master ~]# yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[root@master ~]# rm -f /etc/yum.repos.d/dl.fedoraproject.org*
[root@master ~]# yum -y install ceph-common
2.将ceph配置文件拷贝到各个k8s的节点,在ceph的管理节点操作
[root@ms-adm ceph]# scp /etc/ceph/* 192.168.1.11:/etc/ceph/
root@192.168.1.11's password:
ceph.client.admin.keyring 100% 129 85.8KB/s 00:00
ceph.conf 100% 289 232.2KB/s 00:00
rbdmap 100% 92 48.6KB/s 00:00
tmp2_ragB 100% 0 0.0KB/s 00:00
3.测试pod直接挂载ceph的volume
在master1-admin(ceph的管理节点)上操作
[root@ms-adm ceph]# ceph osd pool create k8srbd 256
pool 'k8srbd' created
[root@ms-adm ceph]# ceph osd lspools
0 rbd,1 k8srbd,
[root@ms-adm ceph]# rbd create rbda -s 1024 -p k8srbd
[root@ms-adm ceph]# rbd feature disable k8srbd/rbda object-map fast-diff deep-flatten
4.测试pod直接挂载刚才创建的ceph rbd
[root@master ~]# cat pod-test.yaml
apiVersion: v1
kind: Pod
metadata:
name: testrbd
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- name: testrbd
mountPath: /mnt
volumes:
- name: testrbd
rbd:
monitors:
- '192.168.1.16:6789' #多个monitor高可用时,可以写多个 - '192.168.1.17:6789'
pool: k8srbd #上面创建了
image: rbda #上面创建了
fsType: xfs #格式化后的格式
readOnly: false
user: admin
keyring: /etc/ceph/ceph.client.admin.keyring
[root@master ~]# kubectl apply -f pod-test.yaml
[root@master ~]# kubectl describe pod testrbd
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 8s default-scheduler Successfully assigned default/testrbd to node1
Normal SuccessfulAttachVolume 8s attachdetach-controller AttachVolume.Attach succeeded for volume "testrbd"
二、基于ceph rbd创建pv,pvc
1.创建ceph-secret这个k8s secret对象,这个secret对象用于k8s volume插件访问ceph集群
获取client.admin的keyring值,并用base64编码,在master1-admin(ceph管理节点)操作
[root@ms-adm ceph]# ceph auth get-key client.admin | base64
QVFBbW9FOWlzRGVzRHhBQSsrbkJwdlNBVlNuMHc2RVU5L0VYWFE9PQ==
2.创建ceph的secret,在k8s的master1上
[root@master ~]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFBbW9FOWlzRGVzRHhBQSsrbkJwdlNBVlNuMHc2RVU5L0VYWFE9PQ==
[root@master ~]# kubectl apply -f ceph-secret.yaml
3.回到ceph 管理节点创建pool池
[root@ms-adm ceph]# ceph osd pool create k8stest 256
[root@ms-adm ceph]# rbd create rbda -s 1024 -p k8stest
[root@ms-adm ceph]# rbd feature disable k8stest/rbda object-map fast-diff deep-flatten
3.创建ceph pv
[root@master ~]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ceph-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
rbd:
monitors:
- 192.168.1.16:6789
pool: k8stest
image: rbda
user: admin
secretRef:
name: ceph-secret
fsType: xfs
readOnly: false
persistentVolumeReclaimPolicy: Recycle
[root@master ~]# kubectl apply -f pv.yaml
persistentvolume/ceph-pv created
[root@master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
ceph-pv 1Gi RWO Recycle Available 6s
4.创建ceph pvc
[root@master ~]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
[root@master ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/ceph-pvc created
[root@master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-pvc Bound ceph-pv 1Gi RWO 6s
5.pod挂载使用
[root@master ~]# cat pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/ceph-data"
name: ceph-data
volumes:
- name: ceph-data
persistentVolumeClaim:
claimName: ceph-pvc
[root@master ~]# kubectl apply -f pod.yaml
[root@master ~]# kubectl describe pod nginx-deployment-b78bc4699-krgvj
...
Mounts:
/ceph-data from ceph-data (rw)
三、基于storageclass生成pv
1.创建rbd的provisioner
参考:https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd/deploy/rbac
[root@master ~]# cat rbd-provisioner.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-provisioner
spec:
selector:
matchLabels:
app: rbd-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: quay.io/external_storage/rbd-provisioner:latest
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
[root@master ~]# kubectl apply -f rbd-provisioner.yaml
2.创建ceph-secret
[root@master ~]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "ceph.com/rbd"
data:
key: QVFBczlGOWRCVTkrSXhBQThLa1k4VERQQjhVT29wd0FnZkNDQmc9PQ==
[root@master ~]# kubectl apply -f ceph-secret.yaml
3.创建storageclass
[root@master ~]# cat storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: k8s-rbd
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.1.16:6789
adminId: admin
adminSecretName: ceph-secret
pool: k8stest
userId: admin
userSecretName: ceph-secret
fsType: xfs
imageFormat: "2"
imageFeatures: "layering"
[root@master ~]# kubectl apply -f storageclass.yaml
[root@master ~]# kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
k8s-rbd ceph.com/rbd Delete Immediate false 2m2s
4.创建pvc
[root@master ~]# cat rbd-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: k8s-rbd
[root@master ~]# kubectl apply -f rbd-pvc.yaml
[root@master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
rbd-pvc Bound pvc-c5568868-cf9b-4754-862b-55b25d9353ff 1Gi RWO k8s-rbd 6s
5.创建pod
[root@master ~]# cat pod-sto.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: rbd-pod
name: ceph-rbd-pod
spec:
containers:
- name: ceph-rbd-nginx
image: nginx
volumeMounts:
- name: ceph-rbd
mountPath: /mnt
readOnly: false
volumes:
- name: ceph-rbd
persistentVolumeClaim:
claimName: rbd-pvc
[root@master ~]# kubectl apply -f pod-sto.yaml
[root@master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
ceph-rbd-pod 1/1 Running 0 42s
rbd-provisioner-c968dcb4b-nhp2g 1/1 Running 0 13m
6.K8S有状态服务-statefulSet+ceph最佳实践工
[root@master ~]# cat state.yaml
apiVersion: v1
kind: Service
metadata:
name: storage
labels:
app: storage
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: storage
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: storage
spec:
serviceName: "storage"
replicas: 2
selector:
matchLabels:
app: storage
template:
metadata:
labels:
app: storage
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usrlshare/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: ["ReadWriteOnce"]
volumeMode: Filesystem
storageClassName: k8s-rbd
resources:
requests:
storage: 1Gi
[root@master ~]# kubectl apply -f state.yaml
挂载cephfs文件系统
1、创建ceph子目录
为了别的地方能挂载cephfs,先创建一个secretfile
[root@ms-adm ~]# cat /etc/ceph/ceph.client.admin.keyring | grep key|awk -F " " {print $3}’ \
> /etc/ceph/admin.secret
挂载cephfs的根目录到集群的mon节点下的一个目录,比如test_data,因为挂载后,我们就可以直接在test_data下面用Linux命令创建子目录了。
[root@ms-adm ~]# mkdir test_data
[root@ms-adm ~]# mount -t ceph 192.168.1.111:6789:/ /root/test_data -o name=admin, secretfile=/etc/ceph/admin.secrete
[root@ms-adm ~]# df -h
192.168.1.11:6789:/ 15G 106M 15G 1% /root/test_data
在cephfs 的根目录里面创建了一个子目录kube,k8s 以后就可以挂载这个目录
[root@ms-adm ~]# cd /root/test_data
[root@ms-adm ~]# mkdir /root/test_data/kube
[root@ms-adm ~]# chmod 0777 /root/test_data/kube
更多推荐
所有评论(0)