前提条件,已经安装了docker和配置了k8s集群,前面文章有配置k8s的详细说明。
1、创建动态申请存储的声明

vi sc.yaml
#输入以下内容

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: es-nfs-storage
provisioner: yixiu
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Retain
allowVolumeExpansion: True

2、角色授权即绑定

vi rbac.yaml
#输入以下内容

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: es-nfs-storage
provisioner: yixiu
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Retain
allowVolumeExpansion: True
[root@kuboard es]# cat rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: es-nfs-client-provisioner
  namespace: default
  
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: es-nfs-client-provisioner-runner
rules:
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["events"]
  verbs: ["create", "update", "patch"]
  
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-es-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: es-nfs-client-provisioner
  namespace: default
roleRef:
  kind: ClusterRole
  name: es-nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
  
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-es-nfs-client-provisioner
  namespace: default
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
  
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-es-nfs-client-provisioner
  namespace: default
subjects:
- kind: ServiceAccount
  name: es-nfs-client-provisioner
  namespace: default
roleRef:
  kind: Role
  name: leader-locking-es-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

3、部署 nfs 控制器

vi deploy.yaml
#输入以下内容

apiVersion: apps/v1
kind: Deployment
metadata:
  name: es-nfs-client-provisioner
  labels:
    app: es-nfs-client-provisioner
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: es-nfs-client-provisioner
  template:
    metadata:
      labels:
        app: es-nfs-client-provisioner
    spec:
      serviceAccountName: es-nfs-client-provisioner
      containers:
      - name: es-nfs-client-provisioner
        image: easzlab/nfs-subdir-external-provisioner:v4.0.1
        #镜像拉取策略看自己的网速把
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: nfs-client-root
          mountPath: /persistentvolumes
        env:
        - name: PROVISIONER_NAME
          value: yixiu
        - name: NFS_SERVER
          value: 192.168.2.226
        - name: NFS_PATH
          value: /home/nfs/es
      volumes:
      - name: nfs-client-root
        nfs:
          server: 192.168.2.226
          path: /home/nfs/es

4、创建es的服务

vi svc.yaml
#输入以下内容
apiVersion: v1
kind: Service
metadata:
  name: es-cluster-svc
  namespace: default
spec:
  selector:
    app: es
  type: NodePort
  ports:
  - name: restful
    port: 9200
    targetPort: 9200
    nodePort: 32000

5、部署es

vi es.yaml
#输入以下内容
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es-cluster
  namespace: default 
spec:
  serviceName: es-cluster-svc
  replicas: 3
  selector:
    matchLabels:
      app: es
  template:
    metadata:
      labels: 
        app: es
    spec:
      initContainers:
      - name: increase-vm-max-map
        image: busybox:1.32
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
      - name: increase-fd-ulimit
        image: busybox:1.32
        command: ["sh", "-c", "ulimit -n 65536"]
        securityContext:
          privileged: true
      containers:
      - name: es-container
        image: elasticsearch:7.8.0
        ports:
        - name: restful
          containerPort: 9200
          protocol: TCP
        - name: internal
          containerPort: 9300
          protocol: TCP
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        env:
        - name: cluster.name
          value: es-prod
        # 定义节点名,使用metadata.name名称
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        # 初始化集群时,ES从中选出master节点
        - name: cluster.initial_master_nodes
          # 对应metadata.name名称加编号,编号从0开始
          value: "es-cluster-0,es-cluster-1,es-cluster-2"
        - name: discovery.zen.minimum_master_nodes
          value: "2"
        - name: discovery.seed_hosts
          value: "es-cluster-0.es-cluster-svc,es-cluster-1.es-cluster-svc,es-cluster-2.es-cluster-svc"
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: network.host
          value: "0.0.0.0"
 
  volumeClaimTemplates:
  - metadata:
      name: data
      labels:
        app: es-volume
      namespace: defalut
    spec:
      # 存储卷可以被单个节点读写
      accessModes: 
      - "ReadWriteOnce"
      storageClassName: es-nfs-storage
      resources:
        requests:
          storage: 4Gi

6、部署kibana

vi kibana.yaml
#输入以下内容
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: default
  labels:
    app: kibana
spec:
  type: NodePort
  ports:
  - port: 5601
    nodePort: 30601
    targetPort: 5601
  selector:
    app: kibana

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: default
  labels:
    app: kibana
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: kibana:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
        - name: ELASTICSEARCH_HOSTS
          value: http://es-cluster-svc:9200
        ports:
        - containerPort: 5601
#部署命令:
#按照创建的命令依次执行
kubectl apply -f 创建的yaml文件
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐