Kubernetes集群存储
首先查看k8s集群是否正常[root@k8s-master pod]# kubectl get nodeNAMESTATUSROLESAGEVERSIONk8s-masterReadymaster6d10hv1.17.0k8s-node1Ready<none>6d10hv1.17.0k8s-node2Ready<none>6d10hv1.17.0在
·
首先查看k8s集群是否正常
[root@k8s-master pod]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 6d10h v1.17.0
k8s-node1 Ready <none> 6d10h v1.17.0
k8s-node2 Ready <none> 6d10h v1.17.0
在另外的机器上面安装nfs centos7的图形化默认是安装了nfs服务器的
要是没有安装则安装下
[root@client ~]# yum install -y nfs-utils rpcbind
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package nfs-utils.x86_64 1:1.3.0-0.8.el7 will be updated
---> Package nfs-utils.x86_64 1:1.3.0-0.66.el7 will be an update
--> Processing Dependency: libtirpc >= 0.2.4-0.7 for package: 1:nfs-utils-1.3.0-0.66.el7.x86_64
--> Processing Dependency: gssproxy >= 0.7.0-3 for package: 1:nfs-utils-1.3.0-0.66.el7.x86_64
---> Package rpcbind.x86_64 0:0.2.0-26.el7 will be updated
---> Package rpcbind.x86_64 0:0.2.0-49.el7 will be an update
..............................
Updated:
nfs-utils.x86_64 1:1.3.0-0.66.el7 rpcbind.x86_64 0:0.2.0-49.el7
Dependency Updated:
gssproxy.x86_64 0:0.7.0-28.el7 krb5-libs.x86_64 0:1.15.1-46.el7 libcollection.x86_64 0:0.7.0-32.el7 libini_config.x86_64 0:1.3.1-32.el7 libtirpc.x86_64 0:0.2.4-0.16.el7
Complete!
安装完毕之后创建文件夹并赋予相关的权限
[root@client ~]# mkdir /nfs1
[root@client ~]# chmod 777 /nfs1
[root@client ~]# chown nfsnobody /nfs1
[root@client ~]# cat /etc/exports
/nfs1 *(rw,no_root_squash,no_all_squash,sync)
[root@client ~]# systemctl stop firewalld.service
[root@client ~]# setenforce 0
[root@client ~]# systemctl restart nfs rpcbind
[root@client ~]#
在其他任意一个节点验证是否能正常使用
[root@k8s-node1 ~]# showmount -e 192.168.40.13
Export list for 192.168.40.13:
/nfs1 *
[root@k8s-node1 ~]# mkdir /nfs
[root@k8s-node1 ~]# mount -t nfs 192.168.40.13:/nfs1 /nfs
[root@k8s-node1 ~]# df -hT|grep nfs
192.168.40.13:/nfs1 nfs4 37G 5.8G 31G 16% /nfs
[root@k8s-node1 ~]#
[root@k8s-node1 ~]# umount /nfs
在master机器上面创建pv
[root@k8s-master pod]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv1
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs1
server: 192.168.40.13
[root@k8s-master pod]# kubectl create -f pv.yaml
[root@k8s-master pod]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 1Gi RWO Retain Bound default/www-web-0 nfs 33m
[root@k8s-master pod]#
创建PVC
[root@k8s-master pod]# cat pvc.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "nfs"
resources:
requests:
storage: 1Gi
运行并查看
[root@k8s-master pod]# kubectl create -f pvc.yaml
[root@k8s-master pod]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound nfspv1 1Gi RWO nfs 36m
[root@k8s-master pod]#
进入pod验证数据是否通过nfs保存到存储端了
root@web-0:/# cd /usr/share/nginx/html/
root@web-0:/usr/share/nginx/html# ls
index.html
root@web-0:/usr/share/nginx/html# date > index.html
root@web-0:/usr/share/nginx/html# cat index.html
Mon Jul 27 13:23:56 UTC 2020
root@web-0:/usr/share/nginx/html#
在nfs端查看内容
[root@client ~]# cd /nfs1/
[root@client nfs1]# ls
index.html
[root@client nfs1]# cat index.html
Mon Jul 27 13:23:56 UTC 2020
[root@client nfs1]#
在k8s集群内容访问
[root@k8s-master pod]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
web-0 1/1 Running 0 41m 10.244.1.46 k8s-node1 <none> <none>
[root@k8s-master pod]# curl 10.244.1.46
Mon Jul 27 13:23:56 UTC 2020
[root@k8s-master pod]#
通过上面的实验k8s集群可以通过多种方式存储数据nfs只是其中的一种,还包括分布式存储,块储存等多种方式。
2、hostpath
[root@k8s-master yaml]# cat hostpath.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: nginx:1.8
name: test-container
volumeMounts:
- mountPath: /usr/share/nginx/html
name: test-volume
volumes:
- name: test-volume
hostPath:
path: /web
type: DirectoryOrCreate
[root@k8s-master yaml]#
在节点1上面创建文件
[root@k8s-node1 ~]# mkdir /web
[root@k8s-node1 ~]# echo k8s-node1 > /web/index.html
节点2上面创建文件
[root@k8s-node2 ~]# mkdir /web
[root@k8s-node2 ~]# echo k8s-node2 > /web/index.html
应用yaml文件
[root@k8s-master yaml]# kubectl create -f hostpath.yaml
pod/test-pd created
[root@k8s-master yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-pd 1/1 Running 0 6s 10.244.2.5 k8s-node2 <none> <none>
[root@k8s-master yaml]# while true; do curl 10.244.2.5;sleep 2; done
k8s-node2
k8s-node2
k8s-node2
k8s-node2
^C
[root@k8s-master yaml]#
3、empty
[root@k8s-master yaml]# cat pod-demo-vol.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
jingwei.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name:
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html
- name: busybox
image: busybox
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "sleep 7200"
volumes:
- name: html
emptyDir: {}
写入数据并查看数据
[root@k8s-master yaml]# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # ls
bin data dev etc home proc root sys tmp usr var
/ # date > data/index.html
/ # date >> data/index.html
/ # date >> data/index.html
/ # date >> data/index.html
/ # cat data/index.html
Tue Sep 1 07:34:24 UTC 2020
Tue Sep 1 07:34:29 UTC 2020
Tue Sep 1 07:34:31 UTC 2020
Tue Sep 1 07:34:32 UTC 2020
/ #
[root@k8s-master yaml]# kubectl exec -it pod-demo -c myapp -- /bin/sh
/ # ls
bin dev home media proc run srv tmp var
data etc lib mnt root sbin sys usr
/ # cat data/web/html/index.html
Tue Sep 1 07:34:24 UTC 2020
Tue Sep 1 07:34:29 UTC 2020
Tue Sep 1 07:34:31 UTC 2020
Tue Sep 1 07:34:32 UTC 2020
/ #
更多推荐
已为社区贡献1条内容
所有评论(0)