1.在linux服务器下载nacos

首先安装git命令 yum install git

git clone https://github.com/nacos-group/nacos-k8s.git

2.部署nfs

   2.1 创建角色 rbac.yaml

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["events"]
  verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

命令及查看创建结果

kubectl create -f rbac.yaml
[root@master nfs]# kubectl
get role NAME AGE leader-locking-nfs-client-provisioner 8m40s

 [root@master nfs]# kubectl get clusterrole|grep nfs
  nfs-client-provisioner-runner 10m

   2.2 创建 ServiceAccount 和部署 NFS-Client Provisioner

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccount: nfs-client-provisioner
      containers:
      - name: nfs-client-provisioner
        image: quay.io/external_storage/nfs-client-provisioner:latest
        volumeMounts:
        - name: nfs-client-root
          mountPath: /persistentvolumes
        env:
        - name: PROVISIONER_NAME
          value: fuseim.pri/ifs
        - name: NFS_SERVER
          value: 172.17.79.3
        - name: NFS_PATH
          value: /data/nfs-share
      volumes:
      - name: nfs-client-root
        nfs:
          server: 172.17.79.3
          path: /data/nfs-share

创建及查看结果

kubectl apply -f deployment.yaml
查看结果

[root@master nfs]# kubectl get pods|grep nfs
nfs-client-provisioner-594f778474-whhb5 0/1 ContainerCreating 0 12m
[root@master nfs]# kubectl describe pods nfs-client-provisioner
Name: nfs-client-provisioner-594f778474-whhb5

第一次错误信息

mount.nfs: No route to host
Warning FailedMount 100s (x5 over 10m) kubelet, node2 Unable to mount volumes for pod "nfs-client-provisioner-594f778474-whhb5_default(56aef93a-9d31-11e9-a4c4-00163e069f44)": timeout expired waiting for volumes to attach or mount for pod "default"/"nfs-client-provisioner-594f778474-whhb5". list of unmounted volumes=[nfs-client-root]. list of unattached volumes=[nfs-client-root nfs-client-provisioner-token-8dcrx]

修改deployment.yaml中server的IP地址为某个node节点的内网IP地址

重启kubectl apply -f deployment.yaml

第二次错误信息

mount.nfs: access denied by server while mounting 172.19.68.8:/data/nfs-share
Warning FailedMount 23s kubelet, node2 Unable to mount volumes for pod "nfs-client-provisioner-5d6996447d-kdp7j_default(cd2c7cc7-9d33-11e9-a4c4-00163e069f44)": timeout expired waiting for volumes to attach or mount for pod "default"/"nfs-client-provisioner-5d6996447d-kdp7j". list of unmounted volumes=[nfs-client-root]. list of unattached volumes=[nfs-client-root nfs-client-provisioner-token-w5txr]
Warning FailedMount 18s kubelet, node2 (combined from similar events): MountVolume.SetUp failed for volume "nfs-client-root" : mount failed: exit status 32
Mounting command: systemd-run
Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/cd2c7cc7-9d33-11e9-a4c4-00163e069f44/volumes/kubernetes.io~nfs/nfs-client-root --scope -- mount -t nfs 172.19.68.8:/data/nfs-share /var/lib/kubelet/pods/cd2c7cc7-9d33-11e9-a4c4-00163e069f44/volumes/kubernetes.io~nfs/nfs-client-root
Output: Running scope as unit run-12037.scope.
mount.nfs: access denied by server while mounting 172.19.68.8:/data/nfs-share

解决办法:添加如下内容

vim /etc/exports
/data/nfs-share/ *(insecure,rw,async,no_root_squash)

重启nfs服务  service nfs restart

[root@master nfs]# kubectl get pods|grep nfs
nfs-client-provisioner-5d6996447d-jnsh4   1/1     Running   0          2m33s

   2.3 创建 NFS StorageClass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs
parameters:
  archiveOnDelete: "false"

创建及验证

kubectl apply -f class.yaml

[root@master nfs]# kubectl get pod -l app=nfs-client-provisioner
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-5d6996447d-jnsh4 1/1 Running 0 6m31s

3. 部署数据库

    部署主库 修改IP部署到node2节点

apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql-master
  labels:
    name: mysql-master
spec:
  replicas: 1
  selector:
    name: mysql-master
  template:
    metadata:
      labels:
        name: mysql-master
    spec:
      containers:
      - name: master
        image: nacos/nacos-mysql-master:latest
        ports:
        - containerPort: 3306
        volumeMounts:
        - name: mysql-master-data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "root"
        - name: MYSQL_DATABASE
          value: "nacos_devtest"
        - name: MYSQL_USER
          value: "nacos"
        - name: MYSQL_PASSWORD
          value: "nacos"
        - name: MYSQL_REPLICATION_USER
          value: 'nacos_ru'
        - name: MYSQL_REPLICATION_PASSWORD
          value: 'nacos_ru'
      volumes:
      - name: mysql-master-data
        nfs:
          server: 172.17.79.3
          path: /data/mysql-master
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-master
  labels:
    name: mysql-master
spec:
  ports:
  - port: 3306
    targetPort: 3306
  selector:
    name: mysql-master

在/etc/exports内添加如下并重启nfs服务

/data/nfs-share/ *(insecure,rw,async,no_root_squash)
/data/mysql-master/ *(insecure,rw,async,no_root_squash)

创建及查看结果

kubectl apply -f mysql-master-nfs.yaml

[root@master mysql]# kubectl get pod|grep mysql mysql-master-7s86c 1/1 Running 0 4m10s

   部署从库

   

apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql-slave
  labels:
    name: mysql-slave
spec:
  replicas: 1
  selector:
    name: mysql-slave
  template:
    metadata:
      labels:
        name: mysql-slave
    spec:
      containers:
      - name: slave
        image: nacos/nacos-mysql-slave:latest
        ports:
        - containerPort: 3306
        volumeMounts:
        - name: mysql-slave-data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "root"
        - name: MYSQL_REPLICATION_USER
          value: 'nacos_ru'
        - name: MYSQL_REPLICATION_PASSWORD
          value: 'nacos_ru'
      volumes:
      - name: mysql-slave-data
        nfs:
          server: 172.19.68.10
          path: /data/mysql-slave
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-slave
  labels:
    name: mysql-slave
spec:
  ports:
  - port: 3306
    targetPort: 3306
  selector:
    name: mysql-slave

   从库部署在node节点和主库部署基本相同,不在赘述

   查看结果

[root@master mysql]# kubectl get pods|grep mysql
mysql-master-7s86c                        1/1     Running   0          8m32s
mysql-slave-bxh4r                         1/1     Running   0          68s

4. 部署nacos

---
apiVersion: v1
kind: Service
metadata:
  name: nacos-headless
  labels:
    app: nacos
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  ports:
    - port: 8848
      name: server
      targetPort: 8848
  clusterIP: None
  selector:
    app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nacos-cm
data:
  mysql.master.db.name: "nacos_devtest"
  mysql.master.port: "3306"
  mysql.slave.port: "3306"
  mysql.master.user: "nacos"
  mysql.master.password: "nacos"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nacos
spec:
  serviceName: nacos-headless
  replicas: 2
  template:
    metadata:
      labels:
        app: nacos
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - nacos
              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: nfs-client-provisioner
      initContainers:
        - name: peer-finder-plugin-install
          image: nacos/nacos-peer-finder-plugin:latest
          imagePullPolicy: Always
          volumeMounts:
            - mountPath: "/home/nacos/plugins/peer-finder"
              name: plugindir
      containers:
        - name: nacos
          imagePullPolicy: Always
          image: nacos/nacos-server:latest
          resources:
            requests:
              memory: "2Gi"
              cpu: "500m"
          ports:
            - containerPort: 8848
              name: client-port
          env:
            - name: NACOS_REPLICAS
              value: "3"
            - name: SERVICE_NAME
              value: "nacos-headless"
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: MYSQL_MASTER_SERVICE_DB_NAME
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.master.db.name
            - name: MYSQL_MASTER_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.master.port
            - name: MYSQL_SLAVE_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.slave.port
            - name: MYSQL_MASTER_SERVICE_USER
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.master.user
            - name: MYSQL_MASTER_SERVICE_PASSWORD
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.master.password
            - name: NACOS_SERVER_PORT
              value: "8848"
            - name: PREFER_HOST_MODE
              value: "hostname"
          readinessProbe:
            httpGet:
              port: client-port
              path: /nacos/v1/console/health/readiness
            initialDelaySeconds: 60
            timeoutSeconds: 3
          livenessProbe:
            httpGet:
              port: client-port
              path: /nacos/v1/console/health/liveness
            initialDelaySeconds: 60
            timeoutSeconds: 3
          volumeMounts:
            - name: plugindir
              mountPath: /home/nacos/plugins/peer-finder
            - name: datadir
              mountPath: /home/nacos/data
            - name: logdir
              mountPath: /home/nacos/logs
  volumeClaimTemplates:
    - metadata:
        name: plugindir
        annotations:
          volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
      spec:
        accessModes: [ "ReadWriteMany" ]
        resources:
          requests:
            storage: 5Gi
    - metadata:
        name: datadir
        annotations:
          volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
      spec:
        accessModes: [ "ReadWriteMany" ]
        resources:
          requests:
            storage: 5Gi
    - metadata:
        name: logdir
        annotations:
          volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
      spec:
        accessModes: [ "ReadWriteMany" ]
        resources:
          requests:
            storage: 5Gi
  selector:
    matchLabels:
      app: nacos

直接执行

kubectl apply -f nacos-pvc-nfs.yaml

命令查看

[root@master etc]# kubectl get pods|grep nacos
nacos-0                                   1/1     Running   0          13m
nacos-1                                   1/1     Running   0          8m44s

到此大功告成

由于需要浏览器访问nacos

--1--配置文件需要做如下修改  新增如下信息  这种方式访问不到果断放弃

---
# 用于暴露nacos-dashboard到外网
kind: Service
apiVersion: v1
metadata:
name: nacos-dashboard
labels:
app: nacos
spec:
type: NodePort
ports:
- port: 8848
targetPort: 8848
selector:
app: nacos

再次执行kubectl apply -f nacos-pvc-nfs.yaml

查看结果

[root@master nacos]# kubectl get svc|grep nacos
nacos-headless   NodePort    10.105.238.82   <none>        8848:30196/TCP   36s

--2--通过ingress 

 

转载于:https://www.cnblogs.com/mutong1228/p/11124590.html

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐