一.  环境规划:

主机名主机地址角色主机配置
master1192.168.188.11负载均衡主节点,高可用MASTER节点,k8s主节点

至少2核,2G运行内存

master2192.168.188.12负载均衡备节点,高可用BACKUP节点,k8s主节点至少2核,2G运行内存
master3192.168.188.12负载均衡备节点,高可用BACKUP节点,k8s主节点至少2核,2G运行内存
node1192.168.188.13k8s工作节点1核,1G运行内存
node2192.168.188.14k8s工作节点1核,1G运行内存
VIP:192.168.188.100高可用VIP地址

二.  k8s基础环境部署:五个节点都要操作。

        1.  配置扩展源:

[root@master1 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

        2.  设置时间同步:

​
[root@master1 ~]# yum install chrony -y

[root@master1 ~]# vim /etc/chrony.conf

server ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony

[root@master1 ~]# systemctl enable --now chronyd

[root@master1 ~]# chronyc sources

        3.  配置hosts解析:

[root@master1 ~]# vim /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.188.11 master1
192.168.188.12 master2
192.168.188.13 master3
192.168.188.14 node1
192.168.188.15 node2
192.168.188.100 k8s.yunjisuan.com

        4.  禁用swap分区:

[root@master1 ~]# vim /etc/fstab 
 
## 将文件中该行注释掉
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

        5.  升级操作系统内核:

## 导入elrepo gpg key
[root@master1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 
## 安装elrepo yum仓库
[root@master1 ~]# yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
 
## 安装install kernel-ml,ml为长期稳定版本,lt为长期维护版本
[root@master1 ~]# yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
 
## 设置grub2默认引导为0
[root@master1 ~]# grub2-set-default 0
 
## 重新生成grub2引导文件
[root@master1 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
 
## 更新后需要重启,是升级的内核生效
[root@master1 ~]# reboot
 
## 重新查看内核
[root@master1 ~]# uname -r
6.0.0-1.el7.elrepo.x86_64

        6.  修改linux的内核参数:

## 修改linux内核参数,添加网桥过滤和地址转发功能
[root@master1 ~]# vim /etc/sysctl.d/kubernetes.conf
 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
 
## 重新加载配置
[root@master1 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf
 
## 加载网桥过滤模块
[root@master1 ~]# modprobe br_netfilter
 
## 修改bridge-nf-call-iptables文件
[root@master1 ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
 
## 查看网桥过滤模块是否加载成功
[root@master1 ~]# lsmod | grep br_netfilter
br_netfilter           28672  0

        7.  配置ipvs功能:在kubernetes中service有两种代理模型,一种是基于iptables的,一种是基于ipvs的两者比较的话,ipvs的性能明显要高一些,但是如果要使用它,需要手动载入ipvs模块。

##  安装ipset和ipvsadm
[root@master1 ~]# yum install ipset ipvsadm -y
 
## 添加需要加载的模块写入脚本文件
[root@master1 ~]# vim /etc/sysconfig/modules/ipvs.modules
 
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
 
## 为脚本添加执行权限
[root@master1 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
 
## 执行脚本文件
[root@master1 ~]# sh /etc/sysconfig/modules/ipvs.modules
 
## 查看对应的模块是否加载成功:
[root@master1 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh               16384  0 
ip_vs_wrr              16384  0 
ip_vs_rr               16384  0 
ip_vs                 163840  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          159744  5 xt_conntrack,nf_nat,xt_nat,xt_MASQUERADE,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
libcrc32c              16384  5 nf_conntrack,nf_nat,nf_tables,xfs,ip_vs

        8.  由于k8s的镜像源在国外,速度比较慢,因此这里切换成国内源:

[root@master1 ~]# vim /etc/yum.repos.d/kubernetes.repo
 
[Kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
         http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

[root@master1 ~]# yum clean all && yum makecache

        9.   集群软件安装:

## 查看所有可用版本
[root@master1 ~]# yum list kubeadm kubelet kubectl --showduplicates | sort -r
 
[root@master1 ~]# yum install kubelet-1.24.2 kubeadm-1.24.2 kubectl-1.24.2
 
## 查看k8s版本
[root@master1 ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"24", GitVersion:"v1.24.2", 
GitCommit:"f66044f4361b9f1f96f0053dd46cb7dce5e990a8", GitTreeState:"clean", 
BuildDate:"2022-06-15T14:20:54Z", GoVersion:"go1.18.3", Compiler:"gc", 

        10.  配置kubelet的cgroup:为了实现docker使用的cgroupdriver与kubelet使用cgroup的一致性,建议使用以下文件内容。

[root@master1 ~]# vim /etc/sysconfig/kubelet 
 
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"

        11.  设置kubelet开机自启:

[root@master1 ~]# systemctl enable kubelet.service --now

        12.  配置五个节点间的免密钥通信:

##每个节点都要创建自己密码文件
[root@master1 ~]# ssh-keygen -f ~/.ssh/id_rsa -P '' -q

##每个节点将记得密码文件拷贝到master1,master1也要拷贝
[root@master1 ~]# ssh-copy-id master1

##将master1上的authorized_keys文件传送到其他节点上,该步骤只在master1上操作即可
[root@master1 ~]# scp ~/.ssh/authorized_keys root@master2:~/.ssh/

[root@master1 ~]# scp ~/.ssh/authorized_keys root@master3:~/.ssh/

[root@master1 ~]# scp ~/.ssh/authorized_keys root@node1:~/.ssh/

[root@master1 ~]# scp ~/.ssh/authorized_keys root@node2:~/.ssh/

三.  配置负载均衡,这里使用的是Haproxy进行负载均衡:三个master节点都要操作

        1.  安装haproxy软件:

[root@master1 ~]# yum install haproxy -y

        2.  备份原有文件:

[root@master1 ~]# cp /etc/haproxy/haproxy.cfg{,.bak}

        3.  编写Haproxy配置文件:三个节点配置文件一致

##编写haproxy配置文件:
[root@master1 ~]# vim /etc/haproxy/haproxy.cfg

global
  log         127.0.0.1 local2
  chroot      /var/lib/haproxy
  pidfile     /var/run/haproxy.pid
  maxconn     4000
  user        haproxy
  group       haproxy
  daemon
 
  stats socket /var/lib/haproxy/stats
  ssl-default-bind-ciphers PROFILE=SYSTEM
  ssl-default-server-ciphers PROFILE=SYSTEM

defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

frontend k8s-apiserver
    mode	tcp
    bind	*:16443  ##指定HAProxy的监听地址加端口
    option	tcplog
    default_backend k8s-apiserver  ##指定后端配置

listen stats
    bind	*:1080
    stats auth  admin:123456
    stats refresh 5s
    stats realm HAProxy\ Statistics
    stats uri /admin?stats

backend k8s-apiserver
    mode	tcp
    balance	roundrobin
    server master1 192.168.188.11:6443 check  ##调用的后端地址和端口
    server master2 192.168.188.12:6443 check
    server master3 192.168.188.13:6443 check

        4.  开启Haproxy日志功能:

[root@master1 ~]# vim /etc/rsyslog.conf 

$ModLoad imudp
$UDPServerRun 514
 
$ModLoad imtcp
$InputTCPServerRun 514
 
local2.*                                                /var/log/haproxy.log

[root@master1 ~]# systemctl restart rsyslog.service

四.  配置高可用:这里使用的Keepalived实现,三个master节点都要操作

        1.  安装keepalived软件:

[root@master1 ~]# yum install keepalived -y

        2.  备份keepalived原配置文件:

[root@master1 ~]# cp /etc/keepalived/keepalived.conf{,.bak}

        3.  编写master1的keepalived配置文件:

! Configuration File for keepalived

global_defs {
   router_id LVS_MASTER1

   script_user root
   enable_script_security
}

vrrp_script chkHaproxy {
    #script "/etc/keepalived/check_haproxy.sh"
    script "killall -0 haproxy"
    interval 3
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
   
    track_script {
      chkHaproxy
    }

    virtual_ipaddress {
        192.168.188.100
    }
}

        4.  编写master2的keepalived配置文件:

[root@master2 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id LVS_MASTER2

   script_user root
   enable_script_security
}

vrrp_script chkHaproxy {
    #script "/etc/keepalived/check_haproxy.sh"
    script "killall -0 haproxy"
    interval 3
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    nopreempt 
    interface ens33
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    track_script {
      chkHaproxy
    }

    virtual_ipaddress {
        192.168.188.100
    }
}

        5.   编写master3的keepalived配置文件:

[root@master3 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id LVS_MASTER3

   script_user root
   enable_script_security
}

vrrp_script chkHaproxy {
    #script "/etc/keepalived/check_haproxy.sh"
    script "killall -0 haproxy"
    interval 3
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    nopreempt 
    interface ens33
    virtual_router_id 51
    priority 98
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    track_script {
      chkHaproxy
    }

    virtual_ipaddress {
        192.168.188.100
    }
}

        6.  也可以通过脚本来监控haproxy服务状态:

[root@master1 ~]# vim /etc/haproxy/check_haproxy.sh 
#!/bin/bash

pid=`ps -C haproxy --no-header | wc -l`
if [ $pid -eq 0 ]
then 
  systemctl start haproxy
  if [ `ps -C haproxy --no-header | wc -l` -eq 0 ]
  then
    systemctl stop keepalived

    echo "haproxy down" >> /tmp/haproxy_check.log
    sleep 10
  fi
fi

        7.  启动haproxy,keepalived服务:

[root@master1 ~]# systemctl enable --now haproxy.service 

[root@master1 ~]# systemctl enable --now keepalived.service

五.  测试Haproxy+Keepalived集群是否配置成功:

        1.  查看VIP所在主机:

[root@master1 ~]# ip a | grep 192.168.188.100
    inet 192.168.188.100/32 scope global ens33
[root@master1 ~]# 

[root@master2 ~]# ip a | grep 192.168.188.100
[root@master2 ~]# 

[root@master3 ~]# ip a | grep 192.168.188.100
[root@master3 ~]# 

由此可见VIP在master1上。

        2.  停掉master1的haproxy服务,再次查看VIP:

[root@master1 ~]# systemctl stop haproxy.service

[root@master1 ~]# ip a | grep 192.168.188.100
[root@master1 ~]# 

[root@master2 ~]# ip a | grep 192.168.188.100
    inet 192.168.188.100/32 scope global ens33
[root@master2 ~]# 

[root@master3 ~]# ip a | grep 192.168.188.100
[root@master3 ~]#

由此可见VIP已经飘移到master2上。

        3.  恢复master1的haproxy服务,再次查看VIP是否飘移:

[root@master1 ~]# systemctl start haproxy.service

[root@master1 ~]# ip a | grep 192.168.188.100
[root@master1 ~]# 

[root@master2 ~]# ip a | grep 192.168.188.100
    inet 192.168.188.100/32 scope global ens33
[root@master2 ~]# 

[root@master3 ~]# ip a | grep 192.168.188.100
[root@master3 ~]#

由此可见VIP并没有发生飘移,因为这里配置的是非抢占模式

六.  安装Docker:五个节点都要操作

        1.  切换镜像源:

[root@master1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

        2.  安装最新版Docker:

[root@master1 ~]# yum install docker-ce -y

        3.  配置阿里云镜像加速:

[root@master1 ~]# mkdir /etc/docker
 
[root@master1 ~]# vim /etc/docker/daemon.json
 
{
  "registry-mirrors": ["https://t2alg15i.mirror.aliyuncs.com"]
}

        4.  启动Docker:

[root@master1 ~]# systemctl restart docker

[root@master1 ~]# systemctl enable docker

        5.  检查docker状态和版本:

[root@master1 ~]# docker version 

Client: Docker Engine - Community
 Version:           20.10.21
 API version:       1.41
 Go version:        go1.18.7
 Git commit:        baeda1f
 Built:             Tue Oct 25 18:04:24 2022
 OS/Arch:           linux/amd64
 Context:           default
 Experimental:      true

Server: Docker Engine - Community
 Engine:
  Version:          20.10.21
  API version:      1.41 (minimum version 1.12)
  Go version:       go1.18.7
  Git commit:       3056208
  Built:            Tue Oct 25 18:02:38 2022
  OS/Arch:          linux/amd64
  Experimental:     false
 containerd:
  Version:          1.6.10
  GitCommit:        770bd0108c32f3fb5c73ae1264f7e503fe7b2661
 runc:
  Version:          1.1.4
  GitCommit:        v1.1.4-0-g5fd4c4d
 docker-init:
  Version:          0.19.0
  GitCommit:        de40ad0

        6.  安装cri-dockerd:Kubernetes自v1.24移除了对docker-shim的支持,而Docker Engine默认又不支持CRI规范,因而二者将无法直接完成整合。为此,Mirantis和Docker联合创建了cri-dockerd项目,用于为Docker Engine提供一个能够支持到CRI规范的垫片,从而能够让Kubernetes基于CRI控制Docker。

[root@master1 ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.3/cri-dockerd-0.2.3-3.el7.x86_64.rpm

[root@master1 ~]# yum localinstall cri-dockerd-0.2.3-3.el7.x86_64.rpm

        7.  修改cri-dockerd启动文件:

[root@master1 ~]# vim /usr/lib/systemd/system/cri-docker.service 
 
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-
image=registry.aliyuncs.com/google_containers/pause:3.7

        8.  启动cri-docker:

[root@master1 ~]# systemctl daemon-reload
 
[root@master1 ~]# systemctl start cri-docker

[root@master1 ~]# systemctl enable cri-docker

[root@master1 ~]# systemctl status cri-docker

七.  配置k8s集群:

        1.  集群初始化:只在master1上执行,因为master1当前是集群的leader

[root@master1 ~]# kubeadm init --kubernetes-version=v1.24.2 \
--pod-network-cidr=10.224.0.0/16 \
--apiserver-advertise-address=192.168.188.11 \
--apiserver-bind-port=6443 \  ##apiserver端口
--cri-socket unix:///var/run/cri-dockerd.sock \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.96.0.0/12 \
--control-plane-endpoint=192.168.188.100:16443  ##VIP和Haproxy端口

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  ##向集群中添加master节点执行如下命令
  kubeadm join 192.168.188.100:16443 --token qjppst.s64b8wn7swxvk8ph \
	--discovery-token-ca-cert-hash sha256:23936c5f3ff95023753b8fc917ed5820be9ff51b0dc1a8787cdafd51e10c1dd0 \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

##向集群中添加worker节点执行如下命令
kubeadm join 192.168.188.100:16443 --token qjppst.s64b8wn7swxvk8ph \
	--discovery-token-ca-cert-hash sha256:23936c5f3ff95023753b8fc917ed5820be9ff51b0dc1a8787cdafd51e10c1dd0 

       初始化命令参数说明:

参数意义
- - apiserver-advertise-address=192.168.188.11master主机的IP地址
- - image-repository镜像拉取地址,使用的阿里云仓库地址
- - kubernetes-version=v1.24.2下载的k8s软件版本号
- - service-cidr=10.96.0.0/12 (默认值)k8s内部的service的网络可以使用的IP段,不能和" --pod-network-cidr "一致,可以使用默认
- - pod-network-cidr=10.224.0.0/16 (默认值)k8s内部的pod节点之间网络可以使用的IP段,不能和" --service-cidr "一致,可以使用默认

        2.  master1上执行下列操作:

[root@master1 ~]# mkdir -p $HOME/.kube

[root@master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

[root@master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master1 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf

        3.  向集群中加入master节点:
                1>.  从leader节点拷贝ca文件到mstaer2,master3节点上:

[root@master1 ~]# scp /etc/kubernetes/pki/ca.* root@master2:/etc/kubernetes/pki/
ca.crt                                            100% 1099   322.6KB/s   00:00    
ca.key                                            100% 1675     1.1MB/s   00:00
    
[root@master1 ~]# scp /etc/kubernetes/pki/ca.* root@master3:/etc/kubernetes/pki/
ca.crt                                            100% 1099   485.6KB/s   00:00    
ca.key                                            100% 1675   778.3KB/s   00:00

[root@master1 ~]# scp /etc/kubernetes/pki/sa.* root@master2:/etc/kubernetes/pki/
sa.key                                            100% 1679     1.1MB/s   00:00    
sa.pub                                            100%  451   392.7KB/s   00:00    

[root@master1 ~]# scp /etc/kubernetes/pki/sa.* root@master3:/etc/kubernetes/pki/
sa.key                                            100% 1679     1.0MB/s   00:00    
sa.pub                                            100%  451   236.8KB/s   00:00

[root@master1 ~]# scp /etc/kubernetes/pki/front-proxy-ca.* root@master2:/etc/kubernetes/pki/
front-proxy-ca.crt                                100% 1115   476.7KB/s   00:00    
front-proxy-ca.key                                100% 1679     1.0MB/s   00:00    

[root@master1 ~]# scp /etc/kubernetes/pki/front-proxy-ca.* root@master3:/etc/kubernetes/pki/
front-proxy-ca.crt                                100% 1115   734.3KB/s   00:00    
front-proxy-ca.key                                100% 1679     1.6MB/s   00:00 

##master2上创建/etc/kubernetes/pki/etcd目录
[root@master2 ~]# mkdir /etc/kubernetes/pki/etcd

[root@master1 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master2:/etc/kubernetes/pki/etcd/
ca.crt                                            100% 1086   924.9KB/s   00:00    
ca.key                                            100% 1679     1.4MB/s   00:00

##master3上创建/etc/kubernetes/pki/etcd目录
[root@master3 ~]# mkdir /etc/kubernetes/pki/etcd

[root@master1 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master3:/etc/kubernetes/pki/etcd/
ca.crt                                            100% 1086   769.2KB/s   00:00    
ca.key                                            100% 1679     1.5MB/s   00:00

                2>.  将master2,master3添加入集群:分别在两个节点上操作,并且操作一致

[root@master2 ~]# kubeadm join 192.168.188.100:16443 \
--token qjppst.s64b8wn7swxvk8ph \
--discovery-token-ca-cert-hash sha256:23936c5f3ff95023753b8fc917ed5820be9ff51b0dc1a8787cdafd51e10c1dd0 \
--cri-socket unix:///var/run/cri-dockerd.sock \
--control-plane

                3>.  master2,master3上执行如下操作:

[root@master2 ~]# mkdir -p $HOME/.kube
 
[root@master2 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 
[root@master2 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

        4.  向集群中加入Worker节点:

kubeadm join 192.168.188.100:16443 \
--token qjppst.s64b8wn7swxvk8ph \
--discovery-token-ca-cert-hash sha256:23936c5f3ff95023753b8fc917ed5820be9ff51b0dc1a8787cdafd51e10c1dd0 \
--cri-socket unix:///var/run/cri-dockerd.sock

        5.  使所有节点都可以使用k8s命令:所有节点执行

[root@master1 ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile

[root@master1 ~]# source .bash_profile

        6.  配置命令补全:所有节点执行

[root@master1 ~]# source /usr/share/bash-completion/bash_completion

[root@master1 ~]# source <(kubectl completion bash)

[root@master1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc

        7.  查看所有节点是否加入集群:

[root@master1 ~]# kubectl get nodes 

NAME      STATUS     ROLES           AGE     VERSION
master1   NotReady   control-plane   4h58m   v1.24.2
master2   NotReady   control-plane   4h50m   v1.24.2
master3   NotReady   control-plane   4h49m   v1.24.2
node1     NotReady   <none>          4h49m   v1.24.2
node2     NotReady   <none>          4h48m   v1.24.2

八.  安装网络插件,并验证集群是否部署成功。

        1.  安装网络插件:只在集群中leader节点操作即可

[root@master1 ~]# wget http://down.i4t.com/k8s1.24/kube-flannel.yml
 
[root@master1 ~]# kubectl apply -f kube-flannel.yml

        2.  验证,等待状态都是Runing即可:

[root@master1 ~]# kubectl get pod -A
NAMESPACE     NAME                              READY   STATUS                  RESTARTS        AGE
kube-system   coredns-74586cf9b6-8b88v          0/1     Pending                 0               5h2m
kube-system   coredns-74586cf9b6-rzfsb          0/1     Pending                 0               5h2m
kube-system   etcd-master1                      1/1     Running                 0               5h2m
kube-system   etcd-master2                      1/1     Running                 0               4h28m
kube-system   etcd-master3                      1/1     Running                 0               4h29m
kube-system   kube-apiserver-master1            1/1     Running                 0               5h2m
kube-system   kube-apiserver-master2            1/1     Running                 0               4h29m
kube-system   kube-apiserver-master3            1/1     Running                 0               4h29m
kube-system   kube-controller-manager-master1   1/1     Running                 1 (4h29m ago)   5h2m
kube-system   kube-controller-manager-master2   1/1     Running                 0               4h29m
kube-system   kube-controller-manager-master3   1/1     Running                 0               4h29m
kube-system   kube-flannel-ds-f5594             0/1     Init:ImagePullBackOff   0               4h12m
kube-system   kube-flannel-ds-m5w9n             0/1     Terminating             0               4h48m
kube-system   kube-flannel-ds-nsnnj             0/1     Pending                 0               4h12m
kube-system   kube-flannel-ds-nxxrd             0/1     Terminating             0               4h48m
kube-system   kube-flannel-ds-sfbbl             0/1     Pending                 0               4h12m
kube-system   kube-flannel-ds-spc5p             0/1     Init:ImagePullBackOff   0               4h12m
kube-system   kube-flannel-ds-t8jmx             0/1     Init:ImagePullBackOff   0               4h12m
kube-system   kube-proxy-2jw86                  1/1     Running                 0               5h2m
kube-system   kube-proxy-ljccr                  1/1     Running                 0               4h55m
kube-system   kube-proxy-p5crm                  1/1     Running                 0               4h52m
kube-system   kube-proxy-tr4b7                  1/1     Running                 0               4h54m
kube-system   kube-proxy-xslnt                  1/1     Running                 0               4h54m
kube-system   kube-scheduler-master1            1/1     Running                 1 (4h29m ago)   5h2m
kube-system   kube-scheduler-master2            1/1     Running                 0               4h29m
kube-system   kube-scheduler-master3            1/1     Running                 0               4h29m

        3.  查看指定pod的详细情况:

[root@master1 ~]# kubectl describe pod kube-flannel-ds-m5w9n -n kube-system

        4.  查看集群状态:节点状态都是Ready即可

[root@master1 ~]# kubectl get nodes 
NAME      STATUS     ROLES           AGE     VERSION
master1   Ready   control-plane   5h4m    v1.24.2
master2   Ready   control-plane   4h57m   v1.24.2
master3   Ready   control-plane   4h56m   v1.24.2
node1     Ready   <none>          4h55m   v1.24.2
node2     Ready   <none>          4h54m   v1.24.2

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐