一、安装glusterfs

https://www.cnblogs.com/zhangb8042/p/7801181.html

环境介绍;

centos 7 

[root@k8s-m ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.31.250.144 k8s-m
172.31.250.145 node

 

 

配置好信任池
[root@k8s-m ~]# gluster peer status
Number of Peers: 1

Hostname: node
Uuid: 550bc83e-e15b-40da-9f63-b468d6c7bdb9
State: Peer in Cluster (Connected)

2、创建目录
mkdir /data

3、创建glusterfs的复制卷
[root@node yum.repos.d]# gluster volume create gv0 replica 2 k8s-m:/data   node:/data  force
volume create: gv0: success: please start the volume to access data

4、启动卷
[root@node yum.repos.d]# gluster volume start gv0
volume start: gv0: success

5、查看
[root@k8s-m ~]# gluster volume status gv0
Status of volume: gv0
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick k8s-m:/data                           49152     0          Y       7925 
Brick node:/data                            49152     0          Y       18592
Self-heal Daemon on localhost               N/A       N/A        Y       7948 
Self-heal Daemon on node                    N/A       N/A        Y       18615
 
Task Status of Volume gv0
------------------------------------------------------------------------------
There are no active volume tasks

二、k8s配置

1、配置 endpoints

[root@k8s-m ~]# cat glusterfs-endpoints.json 
{
  "kind": "Endpoints",
  "apiVersion": "v1",
  "metadata": {
    "name": "glusterfs-cluster"
  },
  "subsets": [
    {
      "addresses": [
        {
          "ip": "172.31.250.144"
        }
      ],
      "ports": [
        {
          "port": 1000
        }
      ]
    },
    {
      "addresses": [
        {
          "ip": "172.31.250.145"
        }
      ],
      "ports": [
        {
          "port": 1000
        }
      ]
    }
  ]
}


#导入
kubectl apply -f glusterfs-endpoints.json
#查看
[root@k8s-m ~]# kubectl get ep
NAME                ENDPOINTS                                 AGE
glusterfs-cluster   172.31.250.144:1000,172.31.250.145:1000   17m
kubernetes          172.31.250.144:6443                       24m

  

2、配置 service

[root@k8s-m ~]# cat glusterfs-service.json 
{
  "kind": "Service",
  "apiVersion": "v1",
  "metadata": {
    "name": "glusterfs-cluster"
  },
  "spec": {
    "ports": [
      {"port": 1000}
    ]
  }
}

#导入
kubectl apply -f glusterfs-service.json
#查看
[root@k8s-m ~]# kubectl  get svc 
NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
glusterfs-cluster   ClusterIP   10.105.177.109   <none>        1000/TCP   17m
kubernetes          ClusterIP   10.96.0.1        <none>        443/TCP    24m

 

3、创建测试 pod

[root@k8s-m ~]# cat glusterfs-pod.json 
{
    "apiVersion": "v1",
    "kind": "Pod",
    "metadata": {
        "name": "glusterfs"
    },
    "spec": {
        "containers": [
            {
                "name": "glusterfs",
                "image": "nginx",
                "volumeMounts": [
                    {
                        "mountPath": "/mnt/glusterfs",
                        "name": "glusterfsvol"
                    }
                ]
            }
        ],
        "volumes": [
            {
                "name": "glusterfsvol",
                "glusterfs": {
                    "endpoints": "glusterfs-cluster",
                    "path": "gv0", #之前创建的glusterfs卷名
                    "readOnly": true
                }
            }
        ]
    }
}

#导入
kubectl apply -f  glusterfs-pod.json 
#查看
kubectl  get pod

 

4、创建pv

[root@k8s-m ~]# cat glusterfs-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: gluster-dev-volume
spec:
  capacity:
    storage: 8Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs-cluster"
    path: "gv0"
    readOnly: false

#导入
kubectl apply  -f  glusterfs-pv.yaml 
#查看
kubectl  get pv 

 

5、创建pvc

[root@k8s-m ~]# cat glusterfs-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: glusterfs-nginx
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 8Gi

#导入
kubectl apply -f  glusterfs-pvc.yaml 

#查看
[root@k8s-m ~]# kubectl  get pvc 
NAME              STATUS   VOLUME               CAPACITY   ACCESS MODES   STORAGECLASS   AGE
glusterfs-nginx   Bound    gluster-dev-volume   8Gi        RWX                           11m

 

6、创建挂载卷测试

[root@k8s-m ~]# cat nginx-deployment.yaml 
apiVersion: extensions/v1beta1 
kind: Deployment 
metadata: 
  name: nginx-dm
spec: 
  replicas: 2
  template: 
    metadata: 
      labels: 
        name: nginx 
    spec: 
      containers: 
        - name: nginx 
          image: nginx:alpine 
          imagePullPolicy: IfNotPresent
          ports: 
            - containerPort: 80
          volumeMounts:
            - name: gluster-dev-volume
              mountPath: "/usr/share/nginx/html"
      volumes:
      - name: gluster-dev-volume
        persistentVolumeClaim:
          claimName: glusterfs-nginx

#导入
kubectl apply -f  nginx-deployment.yaml 
#查看
[root@k8s-m ~]# kubectl  get pod 
NAME                       READY   STATUS    RESTARTS   AGE
glusterfs                  1/1     Running   0          15m
nginx-dm-8df56c754-57kpp   1/1     Running   0          12m
nginx-dm-8df56c754-kgsbf   1/1     Running   0          12m


#进入一个pod测试
[root@k8s-m ~]# kubectl  exec -it  nginx-dm-8df56c754-kgsbf  -- /bin/sh
/ # ls /usr/share/nginx/html/
/ # cd  /usr/share/nginx/html/
/usr/share/nginx/html # touch 111.txt
/usr/share/nginx/html # ls
111.txt

#在node节点查看/data目录
[root@node ~]# ll /data/
total 4
-rw-r--r-- 2 root root 0 Jan 10 14:17 111.txt

 

转载于:https://www.cnblogs.com/zhangb8042/p/10249715.html

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐