k8s&Docker安装

# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
# 配置内核参数:
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

#若需要离线安装,可通过 yum reinstall --downloadonly --downloaddir=~即可下载部署安装包即可
#wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo
#sudo sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
wget -O /etc/yum.repos.d/aliyun-docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast

sudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2

#安装Docker,安装前可使用yum list docker-ce --showduplicates  |sort -r查看yum源中的docker列表
sudo yum install -y  docker-ce
sudo systemctl enable docker 
sudo systemctl start docker
sudo systemctl status docker
docker --version
sudo journalctl -xeu docker

#Cgroup Driver配置
cat <<EOF>> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
sudo systemctl restart docker

#配置docker下载镜像站,这里使用的为daocloud,可以根据需求调整为阿里云或其他
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
#执行后,/etc/docker/daemon.json内容如下
{
	"registry-mirrors": ["http://xxxxxx.m.daocloud.io"],
	"exec-opts": ["native.cgroupdriver=systemd"]
}

#停止所有容器
docker stop $(docker ps -q)
#删除所有容器
docker rm $(docker ps -aq)
#删除所有镜像
docker rmi `docker images -q`
#kube-proxy开启ipvs的前置条件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#加载模块
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安装了ipset软件包
sudo yum install ipset -y
#安装管理工具ipvsadm
sudo yum install ipvsadm -y
---------------------------------------------------------------------------------------------------
#导入yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#如遇到signature could not be verified for kubernetes,可调整gpgcheck,若设为1,会进行校验,就会报错如下,可将这里设为0
sudo yum list kubelet  --showduplicates |sort -r
sudo yum install kubelet-1.16.3-0 kubeadm-1.16.3-0 kubectl-1.16.3-0 -y

sudo systemctl enable --now kubelet
---------------------------------------------------------------------------------------------------
#查看需下载的k8s镜像
kubeadm config images list
# master节点初始化
kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.16.3 --apiserver-advertise-address 10.0.20.1 --pod-network-cidr=10.244.0.0/16 --token-ttl 0
#kubectl配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#执行完成master节点初始化命令后会输出带有toker的加入集群命令,若丢失可通过以下命令生成
kubeadm token create --print-join-command 
#flannel网络配置
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#role worker配置
kubectl get no -o wide
kubectl get no -o wide --show-labels
kubectl label nodes node02 node-role.kubernetes.io/worker=worker

NFS部署

yum -y install nfs-utils rpcbind
mkdir -p /data/k8s
chmod 755 /data/k8s
vim /etc/exports
/data/k8s  10.0.0.0/8(rw,sync,no_root_squash)
systemctl start rpcbind.service
systemctl start nfs.service
journalctl -xlu nfs

#访问节点安装客户端
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs

KubeSphere面板部署

##指定区域
#export KKZONE=cn
##下载KubeKey
#curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh -
##权限修改
#chmod +x kk
##部署
#./kk create cluster [--with-kubernetes version] [--with-kubesphere version]
##KubeSphere
#./kk create cluster --with-kubesphere v3.2.1
##若需同时安装 Kubernetes 和 KubeSphere
##./kk create cluster --with-kubernetes v1.21.5 --with-kubesphere v3.2.1
##安装结果检查
#kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f


#nfs配置
mkdir /data/k8s/KubeSphere

#执行以下命令以开始安装
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
#检查安装日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
#使用 kubectl get pod --all-namespaces 查看所有 Pod 在 KubeSphere 相关的命名空间是否正常运行。如果是正常运行,请通过以下命令来检查控制台的端口(默认为 30880):
kubectl get svc/ks-console -n kubesphere-system
#通过 NodePort (IP:30880) 使用默认帐户和密码 (admin/P@88w0rd) 访问 Web 控制台。

ElasticSearch NFS部署

#命名空间创建
cat <<EOF>> elastic.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: es
EOF
---------------------------------------------------------------------------------------------------
#pv
cat <<EOF>> elasticsearch-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-elasticsearch01
  namespace: es
  labels:
    app: elasticsearch
  annotations:
    volume.beta.kubernetes.io/storage-class: "elasticsearch"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server:10.0.20.1
    path: "/data/k8s/elasticsearch/data1"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-elasticsearch02
  namespace: es
  labels:
    app: elasticsearch
  annotations:
    volume.beta.kubernetes.io/storage-class: "elasticsearch"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server:10.0.20.1
    path: "/data/k8s/elasticsearch/data2"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-elasticsearch03
  namespace: es
  labels:
    app: elasticsearch
  annotations:
    volume.beta.kubernetes.io/storage-class: "elasticsearch"
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server:10.0.20.1
    path: "/data/k8s/elasticsearch/data3"
  persistentVolumeReclaimPolicy: Recycle
EOF
---------------------------------------------------------------------------------------------------
#svc
cat <<EOF>> elasticsearch-svc.yaml
kind: Service
apiVersion: v1
metadata:
 name: elasticsearch
 namespace: es
 labels:
   app: elasticsearch
spec:
 selector:
   app: elasticsearch
 clusterIP: None
 ports:
   - port: 9200
     name: rest
   - port: 9300
     name: inter-node
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> elasticsearch-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es
  namespace: es
spec:
  serviceName: elasticsearch
  replicas: 3
  selector:
    matchLabels:
      app: elasticsearch
  template:
    metadata:
      labels: 
        app: elasticsearch
    spec:
      initContainers:
      - name: increase-vm-max-map
        image: busybox:1.35.0
        imagePullPolicy: IfNotPresent
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
      - name: increase-fd-ulimit
        image: busybox:1.35.0
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c", "ulimit -n 65536"]
        securityContext:
          privileged: true
      containers:
      - name: elasticsearch
        image: elasticsearch:7.16.2
        imagePullPolicy: IfNotPresent
        ports:
        - name: rest
          containerPort: 9200
        - name: inter
          containerPort: 9300
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 1000m
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        env:
        - name: cluster.name
          value: k8s-logs
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: cluster.initial_master_nodes
          value: "es-0,es-1,es-2"
        - name: discovery.zen.minimum_master_nodes
          value: "2"
        - name: discovery.seed_hosts
          value: "elasticsearch"
        - name: ES_JAVA_OPTS
          value: "-Xms32768m -Xmx32768m"
        - name: network.host
          value: "0.0.0.0"
        - name: number_of_replicas
          value: "2"
  volumeClaimTemplates:
    - metadata:
        name: data
        labels:
          app: elasticsearch
        annotations:
          volume.beta.kubernetes.io/storage-class: "elasticsearch"
      spec:
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 20Gi
EOF
---------------------------------------------------------------------------------------------------
#若需要远程访问,使用下面的命令将本地端口9200 转发到 Elasticsearch 节点(如es-0)对应的端口
kubectl port-forward es-0 9200:9200 --namespace=elasticsearch-ns
#测试
curl http://localhost:9200/
curl http://localhost:9200/_cluster/state?pretty
#查看详细日志
k8s describe pod es-0 -n es


chmod -R 777 /data/k8s/elasticsearch/*
---------------------------------------------------------------------------------------------------
mkdir /data/k8s

kubectl apply -f elastic.namespace.yaml
kubectl apply -f elasticsearch-pv.yaml
kubectl create -f elasticsearch-statefulset.yaml
kubectl create -f elasticsearch-svc.yaml

kubectl delete -f elasticsearch-svc.yaml
kubectl delete -f elasticsearch-statefulset.yaml
kubectl delete -f elasticsearch-pv.yaml
kubectl delete -f elastic.namespace.yaml

zk&kafka nfs部署

#命名空间创建
cat <<EOF>> zookeeper.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: zk-kafka
   labels:
     name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml

---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk01
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data1"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk02
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data2"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk03
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data3"
  persistentVolumeReclaimPolicy: Recycle
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  namespace: zk-kafka
  labels:
    app: zk
spec:
  selector:
    app: zk
  clusterIP: None
  ports:
    - name: server
      port: 2888
    - name: leader-election
      port: 3888
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  namespace: zk-kafka
  labels:
    app: zk
spec:
  selector:
    app: zk
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 31811
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
  namespace: zk-kafka
spec:
  serviceName: "zk-hs"
  replicas: 3 # by default is 1
  selector:
    matchLabels:
      app: zk # has to match .spec.template.metadata.labels
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zk # has to match .spec.selector.matchLabels
    spec:
      containers:
        - name: zk
          imagePullPolicy: Always
          image: guglecontainers/kubernetes-zookeeper:1.0-3.4.10
          ports:
            - containerPort: 2181
              name: client
            - containerPort: 2888
              name: server
            - containerPort: 3888
              name: leader-election
          command:
            - sh
            - -c
            - "start-zookeeper \
        --servers=3 \
        --data_dir=/var/lib/zookeeper/data \
        --data_log_dir=/var/lib/zookeeper/data/log \
        --conf_dir=/opt/zookeeper/conf \
        --client_port=2181 \
        --election_port=3888 \
        --server_port=2888 \
        --tick_time=2000 \
        --init_limit=10 \
        --sync_limit=5 \
        --heap=4G \
        --max_client_cnxns=60 \
        --snap_retain_count=3 \
        --purge_interval=12 \
        --max_session_timeout=40000 \
        --min_session_timeout=4000 \
        --log_level=INFO"
          readinessProbe:
            exec:
              command:
                - sh
                - -c
                - "zookeeper-ready 2181"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          livenessProbe:
            exec:
              command:
                - sh
                - -c
                - "zookeeper-ready 2181"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/zookeeper
  volumeClaimTemplates:
    - metadata:
        name: datadir
        annotations:
          volume.beta.kubernetes.io/storage-class: "anything"
      spec:
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 1Gi
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-hs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2888
#    name: server
#  - port: 3888
#    name: leader-election
#  clusterIP: None
#  selector:
#    app: zk
#---
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-cs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2181
#    name: client
#  selector:
#    app: zk
#---
#apiVersion: policy/v1beta1
#kind: PodDisruptionBudget
#metadata:
#  name: zk-pdb
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  maxUnavailable: 1
#---
#apiVersion: apps/v1
#kind: StatefulSet
#metadata:
#  name: zk
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  serviceName: zk-hs
#  replicas: 3
#  updateStrategy:
#    type: RollingUpdate
#  podManagementPolicy: OrderedReady
#  template:
#    metadata:
#      labels:
#        app: zk
#    spec:
#      affinity:
#        podAntiAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            - labelSelector:
#                matchExpressions:
#                  - key: "app"
#                    operator: In
#                    values:
#                    - zk
#              topologyKey: "kubernetes.io/hostname"
#      containers:
#      - name: kubernetes-zookeeper
#        #imagePullPolicy: Always
#        imagePullPolicy: ifNotPresent
#        image: "registry.k8s.com/test/zookeeper:1.0-3.4.10"
#        resources:
#          requests:
#            memory: "1Gi"
#            cpu: "0.5"
#        ports:
#        - containerPort: 2181
#          name: client
#        - containerPort: 2888
#          name: server
#        - containerPort: 3888
#          name: leader-election
#        command:
#        - sh
#        - -c
#        - "start-zookeeper \
#          --servers=3 \
#          --data_dir=/var/lib/zookeeper/data \
#          --data_log_dir=/var/lib/zookeeper/data/log \
#          --conf_dir=/opt/zookeeper/conf \
#          --client_port=2181 \
#          --election_port=3888 \
#          --server_port=2888 \
#          --tick_time=2000 \
#          --init_limit=10 \
#          --sync_limit=5 \
#          --heap=512M \
#          --max_client_cnxns=60 \
#          --snap_retain_count=3 \
#          --purge_interval=12 \
#          --max_session_timeout=40000 \
#          --min_session_timeout=4000 \
#          --log_level=INFO"
#        readinessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        livenessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        volumeMounts:
#        - name: datadir
#          mountPath: /var/lib/zookeeper
#      securityContext:
#        # runAsUser: 1000
#        fsGroup: 1000
#  volumeClaimTemplates:
#  - metadata:
#      name: datadir
#    spec:
#      accessModes: [ "ReadWriteOnce" ]
#      resources:
#        requests:
#          storage: 5Gi
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f zookeeper-pv.yaml
kubectl apply -f zookeeper.yaml
kubectl get pods
kubectl get service
kubectl describe pv k8s-pv-zk01
#kubectl patch pv k8s-pv-zk01 -p '{"metadata":{"finalizers":null}}'
kubectl describe pod zk-0 -n zk-kafka
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-1
  namespace: zk-kafka
  labels:
    app: kafka-service-1
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-1
      targetPort: 9092
      nodePort: 30901
      protocol: TCP
  selector:
    app: kafka-1
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-2
  namespace: zk-kafka
  labels:
    app: kafka-service-2
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-2
      targetPort: 9092
      nodePort: 30902
      protocol: TCP
  selector:
    app: kafka-2
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-3
  namespace: zk-kafka
  labels:
    app: kafka-service-3
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-3
      targetPort: 9092
      nodePort: 30903
      protocol: TCP
  selector:
    app: kafka-3
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-1
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-1
  template:
    metadata:
      labels:
        app: kafka-1
    spec:
      containers:
        - name: kafka-1
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_HEAP_OPTS
              value: "-Xms4096m -Xmx4096m"
            - name: KAFKA_BROKER_ID
              value: "1"
            - name: KAFKA_CREATE_TOPICS
              value: mytopic:2:1
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30901"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-2
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-2
  template:
    metadata:
      labels:
        app: kafka-2
    spec:
      containers:
        - name: kafka-2
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_HEAP_OPTS
              value: "-Xms4096m -Xmx4096m"
            - name: KAFKA_BROKER_ID
              value: "2"
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30902"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv2"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-3
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-3
  template:
    metadata:
      labels:
        app: kafka-3
    spec:
      containers:
        - name: kafka-3
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_HEAP_OPTS
              value: "-Xms4096m -Xmx4096m"
            - name: KAFKA_BROKER_ID
              value: "3"
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30903"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv3"
EOF

---------------------------------------------------------------------------------------------------
mkdir /data/k8s/kafka/{pv1,pv2,pv3} -p
kubectl apply -f kafka.yaml
kubectl get pods
kubectl get service

---------------------------------------------------------------------------------------------------



---------------------------------------------------------------------------------------------------

mysql

#命名空间创建
cat <<EOF>> mysql.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: mysql
   labels:
     name: mysql
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pv.yaml
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: model-db-pv
#spec:
#  storageClassName: ml-pv1
#  accessModes:
#  - ReadWriteOnce
#  capacity:
#    storage: 5Gi
#  hostPath:
#    path: /home/work/share/model-db
#  persistentVolumeReclaimPolicy: Retain
#  volumeMode: Filesystem

apiVersion: v1
kind: PersistentVolume
metadata:
  name: model-db-pv
  namespace: mysql
spec:
  storageClassName: ml-pv1
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 5Gi
  persistentVolumeReclaimPolicy: Retain
  #storageClassName: nfs
  nfs:
    path: /data/k8s/mysql
    server: 10.0.21.1
  volumeMode: Filesystem
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: model-db-pv-claim
  namespace: mysql
spec:
  storageClassName: ml-pv1
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-configMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: model-db-config
  namespace: mysql
  labels:
    app: model-db
data:
  my.cnf: |-
    [client]
    default-character-set=utf8mb4
    [mysql]
    default-character-set=utf8mb4
    [mysqld]
    max_connections = 2000
    secure_file_priv=/var/lib/mysql
    sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: model-db
  namespace: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: model-mysql
  template:
    metadata:
      labels:
        app: model-mysql
        namespace: mysql
    spec:
      containers:
      - args:
        - --datadir
        - /var/lib/mysql/datadir
        env:
          - name: MYSQL_ROOT_PASSWORD
            value: root
          - name: MYSQL_USER
            value: user
          - name: MYSQL_PASSWORD
            value: user
        image: mysql:8.0.27
        name: model-db-container
        ports:
        - containerPort: 3306
          name: dbapi
        volumeMounts:
        - mountPath: /var/lib/mysql
          name: model-db-storage
        - name: config
          mountPath: /etc/mysql/conf.d/my.cnf
          subPath: my.cnf
      volumes:
      - name: model-db-storage
        persistentVolumeClaim:
          claimName: model-db-pv-claim
      - name: config      
        configMap:
          name: model-db-config
      - name: localtime
        hostPath:
          type: File
          path: /etc/localtime
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-svc.yaml
#ClusterIP:只对集群内部可见
#apiVersion: v1
#kind: Service
#metadata:
#  labels:
#    app: model-mysql
#  name: model-db-svc
#  namespace: mysql
#spec:
#  type: ClusterIP
#  ports:
#  - port: 3306
#    protocol: TCP
#    targetPort: 3306
#  selector:
#    app: model-mysql

apiVersion: v1
kind: Service
metadata:
  labels:
    app: model-mysql
  name: model-db-svc
  namespace: mysql
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    nodePort: 30336
    protocol: TCP
    targetPort: 3306
  selector:
    app: model-mysql
EOF

---------------------------------------------------------------------------------------------------
kubectl apply -f mysql.namespace.yaml
kubectl apply -f mysql-pv.yaml -n mysql
kubectl get pv -n mysql
kubectl apply -f mysql-pvc.yaml -n mysql
kubectl get pvc -n mysql
kubectl apply -f mysql-configMap.yaml -n mysql
kubectl apply -f mysql-deployment.yaml -n mysql
kubectl apply -f mysql-svc.yaml -n mysql
kubectl describe pvc model-db-pv-claim
---------------------------------------------------------------------------------------------------
kubectl get pods -n mysql
kubectl exec -it model-db-569b698fb8-qc62f bash -n mysql

#先在mysql中创建test_db库,再修改环境变量,重启test_db后报错“Access denied for user ‘root’@‘172.17.0.1’ (using password: NO)”。但mysql用Navicat能够连接上。
#对于熟悉mysql的人,这个错误应该很容易定位。从MySQL8.0 开始,默认的验证方式是 caching_sha2_password(参见 MySQL 8.0.4 : New Default Authentication Plugin : caching_sha2_password)。
#方案1:修改配置文件spring.datasource.password:***
#方案2:将验证方式修改为“mysql_native_password”
mysql -uroot -proot
USE mysql; 
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'root';
FLUSH PRIVILEGES;
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐