前言:如无特殊说明,所有操作都用root账号在所有节点执行。
说明:kubeasz是一款国产开源的k8s部署软件,采用ansible role的部署方式,部署k8s二进制集群。熟悉ansible role的用该软件部署k8s方便快捷。

一、机器

deploy #部署机器
master01
master02
node01

二、环境

2.1 部署机器环境

[root@localhost opt]# cat /etc/redhat-release 
CentOS Linux release 7.9.2009 (Core)
[root@localhost opt]# uname -a
Linux localhost.localdomain 3.10.0-229.el7.x86_64 #1 SMP Fri Mar 6 11:36:42 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost opt]# python -V
Python 3.9.2

2.2 master和node节点机器环境

[root@localhost ~]# cat /etc/redhat-release 
CentOS Linux release 7.5.1804 (Core) 
[root@localhost ~]# uname -a
Linux mater01 3.10.0-862.el7.x86_64 #1 SMP Fri Apr 20 16:44:24 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost ~]# python -V
Python 3.9.2

2.3 ip和主机名

deploy 192.168.30.13
master01 192.168.30.10
master02 192.168.30.11
node01 192.168.10.12

三、配置yum源

说明:CentOS 7 系统的yum源不再可用,需要重新配置。

3.1 备份旧yum源文件

cd /etc/yum.repos.d/
mkdir bak/
mv * ./bak/

3.2 配置Base源

cat >> CentOS-Base.repo << EOF 
# CentOS-Base.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the 
# remarked out baseurl= line instead.
#
#

[base]
name=CentOS-$releasever - Base
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#released updates 
[updates]
name=CentOS-$releasever - Updates
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

3.3 配置CR源

cat >> CentOS-CR.repo << EOF 
# CentOS-CR.repo
#
# The Continuous Release ( CR )  repository contains rpms that are due in the next
# release for a specific CentOS Version ( eg. next release in CentOS-7 ); these rpms
# are far less tested, with no integration checking or update path testing having
# taken place. They are still built from the upstream sources, but might not map 
# to an exact upstream distro release.
#
# These packages are made available soon after they are built, for people willing 
# to test their environments, provide feedback on content for the next release, and
# for people looking for early-access to next release content.
#
# The CR repo is shipped in a disabled state by default; its important that users 
# understand the implications of turning this on. 
#
# NOTE: We do not use a mirrorlist for the CR repos, to ensure content is available
#       to everyone as soon as possible, and not need to wait for the external
#       mirror network to seed first. However, many local mirrors will carry CR repos
#       and if desired you can use one of these local mirrors by editing the baseurl
#       line in the repo config below.
#

[cr]
name=CentOS-$releasever - cr
baseurl=http://mirror.centos.org/centos/$releasever/cr/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0
EOF

3.4 配置Debuginfo源

cat >> CentOS-Debuginfo.repo << EOF
# CentOS-Debug.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#

# All debug packages from all the various CentOS-7 releases
# are merged into a single repo, split by BaseArch
#
# Note: packages in the debuginfo repo are currently not signed
#

[base-debuginfo]
name=CentOS-7 - Debuginfo
baseurl=http://debuginfo.centos.org/7/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-Debug-7
enabled=0
#
EOF

3.5 配置fasttrack源

cat >> CentOS-fasttrack.repo << EOF
#CentOS-fasttrack.repo

[fasttrack]
name=CentOS-7 - fasttrack
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=fasttrack&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/fasttrack/$basearch/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

3.6 配置Media源

cat >> CentOS-Media.repo << EOF
# CentOS-Media.repo
#
#  This repo can be used with mounted DVD media, verify the mount point for
#  CentOS-7.  You can use this repo and yum to install items directly off the
#  DVD ISO that we release.
#
# To use this repo, put in your DVD and use it with the other repos too:
#  yum --enablerepo=c7-media [command]
#  
# or for ONLY the media repo, do this:
#
#  yum --disablerepo=\* --enablerepo=c7-media [command]

[c7-media]
name=CentOS-$releasever - Media
baseurl=file:///media/CentOS/
        file:///media/cdrom/
        file:///media/cdrecorder/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

3.7 配置Sources源

cat >> CentOS-Sources.repo << EOF 
# CentOS-Sources.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the 
# remarked out baseurl= line instead.
#
#

[base-source]
name=CentOS-$releasever - Base Sources
baseurl=http://vault.centos.org/centos/$releasever/os/Source/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#released updates 
[updates-source]
name=CentOS-$releasever - Updates Sources
baseurl=http://vault.centos.org/centos/$releasever/updates/Source/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that may be useful
[extras-source]
name=CentOS-$releasever - Extras Sources
baseurl=http://vault.centos.org/centos/$releasever/extras/Source/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that extend functionality of existing packages
[centosplus-source]
name=CentOS-$releasever - Plus Sources
baseurl=http://vault.centos.org/centos/$releasever/centosplus/Source/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

3.8 配置Vault源

cat >> CentOS-Vault.repo << EOF
# CentOS Vault contains rpms from older releases in the CentOS-7 
# tree.

#c7.0.1406
[C7.0.1406-base]
name=CentOS-7.0.1406 - Base
baseurl=http://vault.centos.org/7.0.1406/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.0.1406-updates]
name=CentOS-7.0.1406 - Updates
baseurl=http://vault.centos.org/7.0.1406/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.0.1406-extras]
name=CentOS-7.0.1406 - Extras
baseurl=http://vault.centos.org/7.0.1406/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.0.1406-centosplus]
name=CentOS-7.0.1406 - CentOSPlus
baseurl=http://vault.centos.org/7.0.1406/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.0.1406-fasttrack]
name=CentOS-7.0.1406 - CentOSPlus
baseurl=http://vault.centos.org/7.0.1406/fasttrack/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

# C7.1.1503
[C7.1.1503-base]
name=CentOS-7.1.1503 - Base
baseurl=http://vault.centos.org/7.1.1503/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.1.1503-updates]
name=CentOS-7.1.1503 - Updates
baseurl=http://vault.centos.org/7.1.1503/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.1.1503-extras]
name=CentOS-7.1.1503 - Extras
baseurl=http://vault.centos.org/7.1.1503/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.1.1503-centosplus]
name=CentOS-7.1.1503 - CentOSPlus
baseurl=http://vault.centos.org/7.1.1503/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.1.1503-fasttrack]
name=CentOS-7.1.1503 - CentOSPlus
baseurl=http://vault.centos.org/7.1.1503/fasttrack/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

# C7.2.1511
[C7.2.1511-base]
name=CentOS-7.2.1511 - Base
baseurl=http://vault.centos.org/7.2.1511/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.2.1511-updates]
name=CentOS-7.2.1511 - Updates
baseurl=http://vault.centos.org/7.2.1511/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.2.1511-extras]
name=CentOS-7.2.1511 - Extras
baseurl=http://vault.centos.org/7.2.1511/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.2.1511-centosplus]
name=CentOS-7.2.1511 - CentOSPlus
baseurl=http://vault.centos.org/7.2.1511/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.2.1511-fasttrack]
name=CentOS-7.2.1511 - CentOSPlus
baseurl=http://vault.centos.org/7.2.1511/fasttrack/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

# C7.3.1611
[C7.3.1611-base]
name=CentOS-7.3.1611 - Base
baseurl=http://vault.centos.org/7.3.1611/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.3.1611-updates]
name=CentOS-7.3.1611 - Updates
baseurl=http://vault.centos.org/7.3.1611/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.3.1611-extras]
name=CentOS-7.3.1611 - Extras
baseurl=http://vault.centos.org/7.3.1611/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.3.1611-centosplus]
name=CentOS-7.3.1611 - CentOSPlus
baseurl=http://vault.centos.org/7.3.1611/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.3.1611-fasttrack]
name=CentOS-7.3.1611 - CentOSPlus
baseurl=http://vault.centos.org/7.3.1611/fasttrack/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

# C7.4.1708
[C7.4.1708-base]
name=CentOS-7.4.1708 - Base
baseurl=http://vault.centos.org/7.4.1708/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.4.1708-updates]
name=CentOS-7.4.1708 - Updates
baseurl=http://vault.centos.org/7.4.1708/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.4.1708-extras]
name=CentOS-7.4.1708 - Extras
baseurl=http://vault.centos.org/7.4.1708/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.4.1708-centosplus]
name=CentOS-7.4.1708 - CentOSPlus
baseurl=http://vault.centos.org/7.4.1708/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0

[C7.4.1708-fasttrack]
name=CentOS-7.4.1708 - CentOSPlus
baseurl=http://vault.centos.org/7.4.1708/fasttrack/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
enabled=0
EOF

3.9 配置x86_64-kernel源

cat >> CentOS-x86_64-kernel.repo << EOF
[centos-kernel]
name=CentOS LTS Kernels for $basearch
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=kernel&infra=$infra
#baseurl=http://mirror.centos.org/altarch/7/kernel/$basearch/
enabled=0
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

[centos-kernel-experimental]
name=CentOS Experimental Kernels for $basearch
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=experimental&infra=$infra
#baseurl=http://mirror.centos.org/altarch/7/experimental/$basearch/
enabled=0
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

3.10 配置docker-ce源

cat >> docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

3.11 配置nginx源

cat >> nginx.repo << EOF
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true

[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF

四、关闭selinux和防火墙

4.1 关闭防火墙

systemctl disable firewalld
systemctl stop firewalld

4.2 关闭selinux

setenforce 0
sed -i 's#SELINUX=.*#SELINUX=disabled#g' /etc/selinux/config

警告:关闭selinux后一定要重启机器,否则会被部署代码识别为未关闭selinux而报错。

关闭selinux后未重启机器报错如下:

TASK [chrony : 下载二进制文件chronyd] **************************************************************************************************************************************************************
fatal: [192.168.30.16]: FAILED! => {"changed": false, "checksum": "99abd6b74a4dbda11f2a65d77903d925bacdd349", "msg": "Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!"}
fatal: [192.168.30.18]: FAILED! => {"changed": false, "checksum": "99abd6b74a4dbda11f2a65d77903d925bacdd349", "msg": "Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!"}
fatal: [192.168.30.17]: FAILED! => {"changed": false, "checksum": "99abd6b74a4dbda11f2a65d77903d925bacdd349", "msg": "Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!"}

五、修改主机名

说明:分别在maser01 master02 node节点执行。

5.1 master01执行

hostnamectl set-hostname master01

5.2 master02执行

hostnamectl set-hostname master02

5.3 node01执行

hostnamectl set-hostname node01

六、部署步骤

说明:仅在deploy节点执行。

6.1 准备ssh免密登陆

配置从部署节点能够ssh免密登陆所有节点,并且设置python软连接

$IP为所有节点地址包括自身,按照提示输入yes 和root密码

ssh-copy-id $IP 

6.2 为每个节点设置python软链接

ssh $IP ln -s /usr/bin/python3 /usr/bin/python

6.3 在部署节点编排k8s安装

6.3.1 下载项目源码、二进制及离线镜像

下载工具脚本ezdown,举例使用kubeasz版本3.5.0

export release=3.5.0
wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
chmod +x ./ezdown

下载kubeasz代码、二进制、默认容器镜像(更多关于ezdown的参数,运行./ezdown 查看)

#国内环境

./ezdown -D

#海外环境

./ezdown -D -m standard

【可选】下载额外容器镜像(cilium,flannel,prometheus等)

./ezdown -X

【可选】下载离线系统包 (适用于无法使用yum/apt仓库情形)

./ezdown -P

上述脚本运行成功后,所有文件(kubeasz代码、二进制、离线镜像)均已整理好放入目录/etc/kubeasz

6.3.2 创建集群配置实例

#容器化运行kubeasz

./ezdown -S

#创建新集群 k8s-01

docker exec -it kubeasz ezctl new k8s-01
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
2021-01-19 10:48:23 DEBUG set version of common plugins
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'

然后根据提示配置’/etc/kubeasz/clusters/k8s-01/hosts’ 和 ‘/etc/kubeasz/clusters/k8s-01/config.yml’:根据前面节点规划修改hosts 文件和其他集群层面的主要配置选项;其他集群组件等配置项可以在config.yml 文件中修改。
修改后的配置文件如下

cat /etc/kubeasz/clusters/k8s-01/hosts 
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
192.168.30.16
192.168.30.17
192.168.30.18

# master node(s)
[kube_master]
192.168.30.16
192.168.30.17

# work node(s)
[kube_node]
192.168.30.18

# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false
192.168.30.16 NEW_INSTALL=true

# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.30.1750 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.30.1750 EX_APISERVER_PORT=8443
192.168.30.16 LB_ROLE=backup EX_APISERVER_VIP=192.168.30.200 EX_APISERVER_PORT=8443
192.168.30.17 LB_ROLE=master EX_APISERVER_VIP=192.168.30.200 EX_APISERVER_PORT=8443

# [optional] ntp server for the cluster
[chrony]
#192.168.30.16
192.168.30.16

[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"

# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"

# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"

# NodePort Range
NODE_PORT_RANGE="30000-32767"

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"

# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/opt/kube/bin"

# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"

# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/k8s-01"

# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
cat /etc/kubeasz/clusters/k8s-01/config.yml 
############################
# prepare
############################
# 可选离线安装系统软件包 (offline|online)
INSTALL_SOURCE: "online"

# 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening
OS_HARDEN: false


############################
# role:deploy
############################
# default: ca will expire in 100 years
# default: certs issued by the ca will expire in 50 years
CA_EXPIRY: "876000h"
CERT_EXPIRY: "438000h"

# force to recreate CA and other certs, not suggested to set 'true'
CHANGE_CA: false

# kubeconfig 配置参数
CLUSTER_NAME: "cluster1"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"

# k8s version
K8S_VER: "1.26.0"

############################
# role:etcd
############################
# 设置不同的wal目录,可以避免磁盘io竞争,提高性能
ETCD_DATA_DIR: "/var/lib/etcd"
ETCD_WAL_DIR: ""


############################
# role:runtime [containerd,docker]
############################
# ------------------------------------------- containerd
# [.]启用容器仓库镜像
ENABLE_MIRROR_REGISTRY: true

# [containerd]基础容器镜像
SANDBOX_IMAGE: "easzlab.io.local:5000/easzlab/pause:3.9"

# [containerd]容器持久化存储目录
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"

# ------------------------------------------- docker
# [docker]容器存储目录
DOCKER_STORAGE_DIR: "/var/lib/docker"

# [docker]开启Restful API
ENABLE_REMOTE_API: false

# [docker]信任的HTTP仓库
INSECURE_REG: '["http://easzlab.io.local:5000"]'


############################
# role:kube-master
############################
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
MASTER_CERT_HOSTS:
  - "192.168.30.16"
  - "192.168.30.17"
  - "192.168.30.18"
  - "k8s.easzlab.io"
  - "easzlab.io.local"
  - "harbor.easzlab.io.local"
  #- "www.test.com"

# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
# https://github.com/coreos/flannel/issues/847
NODE_CIDR_LEN: 24


############################
# role:kube-node
############################
# Kubelet 根目录
KUBELET_ROOT_DIR: "/var/lib/kubelet"

# node节点最大pod 数
MAX_PODS: 110

# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
# 数值设置详见templates/kubelet-config.yaml.j2
KUBE_RESERVED_ENABLED: "no"

# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
SYS_RESERVED_ENABLED: "no"


############################
# role:network [flannel,calico,cilium,kube-ovn,kube-router]
############################
# ------------------------------------------- flannel
# [flannel]设置flannel 后端"host-gw","vxlan"等
FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false

# [flannel] 
flannel_ver: "v0.19.2"

# ------------------------------------------- calico
# [calico] IPIP隧道模式可选项有: [Always, CrossSubnet, Never],跨子网可以配置为Always与CrossSubnet(公有云建议使用always比较省事,其他的话需要修改各自公有云的网络配置,具体可以参考各个公有云说明)
# 其次CrossSubnet为隧道+BGP路由混合模式可以提升网络性能,同子网配置为Never即可.
CALICO_IPV4POOL_IPIP: "Always"

# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"

# [calico]设置calico 网络 backend: brid, vxlan, none
CALICO_NETWORKING_BACKEND: "brid"

# [calico]设置calico 是否使用route reflectors
# 如果集群规模超过50个节点,建议启用该特性
CALICO_RR_ENABLED: false

# CALICO_RR_NODES 配置route reflectors的节点,如果未设置默认使用集群master节点 
# CALICO_RR_NODES: ["192.168.1.1", "192.168.1.2"]
CALICO_RR_NODES: []

# [calico]更新支持calico 版本: ["3.19", "3.23"]
calico_ver: "v3.23.5"

# [calico]calico 主版本
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"

# ------------------------------------------- cilium
# [cilium]镜像版本
cilium_ver: "1.12.4"
cilium_connectivity_check: true
cilium_hubble_enabled: false
cilium_hubble_ui_enabled: false

# ------------------------------------------- kube-ovn
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
OVN_DB_NODE: "{{ groups['kube_master'][0] }}"

# [kube-ovn]离线镜像tar包
kube_ovn_ver: "v1.5.3"

# ------------------------------------------- kube-router
# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
OVERLAY_TYPE: "full"

# [kube-router]NetworkPolicy 支持开关
FIREWALL_ENABLE: true

# [kube-router]kube-router 镜像版本
kube_router_ver: "v0.3.1"
busybox_ver: "1.28.4"


############################
# role:cluster-addon
############################
# coredns 自动安装
dns_install: "yes"
corednsVer: "1.9.3"
ENABLE_LOCAL_DNS_CACHE: true
dnsNodeCacheVer: "1.22.13"
# 设置 local dns cache 地址
LOCAL_DNS_CACHE: "169.254.20.10"

# metric server 自动安装
metricsserver_install: "yes"
metricsVer: "v0.5.2"

# dashboard 自动安装
dashboard_install: "yes"
dashboardVer: "v2.7.0"
dashboardMetricsScraperVer: "v1.0.8"

# prometheus 自动安装
prom_install: "no"
prom_namespace: "monitor"
prom_chart_ver: "39.11.0"

# nfs-provisioner 自动安装
nfs_provisioner_install: "no"
nfs_provisioner_namespace: "kube-system"
nfs_provisioner_ver: "v4.0.2"
nfs_storage_class: "managed-nfs-storage"
nfs_server: "192.168.30.16"
nfs_path: "/data/nfs"

# network-check 自动安装
network_check_enabled: false 
network_check_schedule: "*/5 * * * *"

############################
# role:harbor
############################
# harbor version,完整版本号
HARBOR_VER: "v2.1.5"
HARBOR_DOMAIN: "harbor.easzlab.io.local"
HARBOR_PATH: /var/data
HARBOR_TLS_PORT: 8443
HARBOR_REGISTRY: "{{ HARBOR_DOMAIN }}:{{ HARBOR_TLS_PORT }}"

# if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down'
HARBOR_SELF_SIGNED_CERT: true

# install extra component
HARBOR_WITH_NOTARY: false
HARBOR_WITH_TRIVY: false
HARBOR_WITH_CLAIR: false
HARBOR_WITH_CHARTMUSEUM: true

6.3.3 开始安装 如果你对集群安装流程不熟悉,请阅读项目首页 安装步骤 讲解后分步安装,并对 每步都进行验证

#建议使用alias命令,查看~/.bashrc 文件应该包含:alias dk=‘docker exec -it kubeasz’

source ~/.bashrc

#一键安装,等价于执行docker exec -it kubeasz ezctl setup k8s-01 all

dk ezctl setup k8s-01 all

#或者分步安装,具体使用 dk ezctl help setup 查看分步安装帮助信息

dk ezctl setup k8s-01 01
dk ezctl setup k8s-01 02
dk ezctl setup k8s-01 03
......

七、坑

7.1 操作系统的坑

不要使用CentOS 7.9 系统作为master和node节点,该系统ssh有bug。
bug为宿主机设置了免密登录,宿主机可以免密登录到其他机器,但是kubeasz容器不可以,而kubeasz部署k8s是容器内的ansible需要有免密登录到其他机器的权限。

7.2 证书过期问题

deploy配置好后,如果长期不部署(具体多长时间不清楚,3天后再部署出问题),会有证书过期问题。具体报错如下:
以etcd服务启动为例(进到各个机器看etcd的日志的报错,而非kubeasz部署过程中屏幕输出的报错)

Feb 26 21:26:04 localhost etcd[5587]: {"level":"info","ts":"2023-02-26T21:26:04.720-0500","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"6a0099bf6b9e4072 is starting a new election at term 1"}
Feb 26 21:26:04 localhost etcd[5587]: {"level":"info","ts":"2023-02-26T21:26:04.720-0500","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"6a0099bf6b9e4072 became pre-candidate at term 1"}
Feb 26 21:26:04 localhost etcd[5587]: {"level":"info","ts":"2023-02-26T21:26:04.720-0500","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"6a0099bf6b9e4072 received MsgPreVoteResp from 6a0099bf6b9e4072 at term 1"}
Feb 26 21:26:04 localhost etcd[5587]: {"level":"info","ts":"2023-02-26T21:26:04.720-0500","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"6a0099bf6b9e4072 [logterm: 1, index: 3] sent MsgPreVote request to c4c855f544e30cf3 at term 1"}
Feb 26 21:26:04 localhost etcd[5587]: {"level":"info","ts":"2023-02-26T21:26:04.720-0500","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"6a0099bf6b9e4072 [logterm: 1, index: 3] sent MsgPreVote request to d8380c04b14bbb3a at term 1"}
Feb 26 21:26:07 localhost etcd[5587]: {"level":"warn","ts":"2023-02-26T21:26:07.158-0500","caller":"embed/config_logging.go:169","msg":"rejected connection","remote-addr":"192.168.30.18:54334","server-name":"","error":"remote error: tls: bad certificate"}

八、部署后操作

8.1 配置kubectl

任意选择一台master节点
1、复制kubectl到$PATH目录下

cp /opt/kube/bin/kubectl /usr/bin/

2、配置kubectl证书
从deploy节点复制kubeconfig文件到该master节点
说明:这条命令去deploy节点操作。

scp ~/.kube/config 192.168.30.16:/opt/

临时生效

export export KUBECONFIG=/opt/config

永久生效

echo "export KUBECONFIG=/opt/config" >> /etc/profile
source /etc/profile

九、验证

说明:如无特殊说明,所有验证命令都在配置了kubectl的节点执行。

9.1 验证集群状态

kubectl get nodes
NAME            STATUS                     ROLES    AGE   VERSION
192.168.30.16   Ready,SchedulingDisabled   master   17m   v1.26.0
192.168.30.17   Ready,SchedulingDisabled   master   17m   v1.26.0
192.168.30.18   Ready                      node     16m   v1.26.0

9.2 查看网卡

[root@master01 ~]# ifconfig 
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.30.16  netmask 255.255.255.0  broadcast 192.168.30.255
        inet6 fe80::8a54:dfaa:4fc6:44f5  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:1b:eb:84  txqueuelen 1000  (Ethernet)
        RX packets 1154648  bytes 671138691 (640.0 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 863739  bytes 243853107 (232.5 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 63677  bytes 11744370 (11.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 63677  bytes 11744370 (11.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

tunl0: flags=193<UP,RUNNING,NOARP>  mtu 1480
        inet 172.20.241.64  netmask 255.255.255.255
        tunnel   txqueuelen 1000  (IPIP Tunnel)
        RX packets 1149  bytes 332469 (324.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 1341  bytes 120955 (118.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
[root@master01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:1b:eb:84 brd ff:ff:ff:ff:ff:ff
    inet 192.168.30.16/24 brd 192.168.30.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::8a54:dfaa:4fc6:44f5/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether 5e:97:49:93:04:92 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    link/ether 56:d6:46:64:58:b8 brd ff:ff:ff:ff:ff:ff
    inet 10.68.0.1/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.68.0.2/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.68.114.28/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.68.100.48/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.68.250.76/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.68.185.194/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
5: tunl0@NONE: <NOARP,UP,LOWER_UP> mtu 1480 qdisc noqueue state UNKNOWN group default qlen 1000
    link/ipip 0.0.0.0 brd 0.0.0.0
    inet 172.20.241.64/32 scope global tunl0
       valid_lft forever preferred_lft forever
6: nodelocaldns: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    link/ether ce:6a:4c:6e:08:f0 brd ff:ff:ff:ff:ff:ff
    inet 169.254.20.10/32 scope global nodelocaldns
       valid_lft forever preferred_lft forever

9.3 查看svc ip

[root@master01 ~]# kubectl get svc -A
NAMESPACE     NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default       kubernetes                  ClusterIP   10.68.0.1       <none>        443/TCP                  20m
kube-system   dashboard-metrics-scraper   ClusterIP   10.68.185.194   <none>        8000/TCP                 17m
kube-system   kube-dns                    ClusterIP   10.68.0.2       <none>        53/UDP,53/TCP,9153/TCP   17m
kube-system   kube-dns-upstream           ClusterIP   10.68.114.28    <none>        53/UDP,53/TCP            17m
kube-system   kubernetes-dashboard        NodePort    10.68.250.76    <none>        443:32510/TCP            17m
kube-system   metrics-server              ClusterIP   10.68.100.48    <none>        443/TCP                  17m
kube-system   node-local-dns              ClusterIP   None            <none>        9253/TCP                 17m

9.4 查看pods ip

[root@master01 ~]# kubectl get pods -A -o wide
NAMESPACE     NAME                                         READY   STATUS    RESTARTS   AGE   IP              NODE            NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-89b744d6c-2c8fv      1/1     Running   0          20m   192.168.30.18   192.168.30.18   <none>           <none>
kube-system   calico-node-5vbfg                            1/1     Running   0          20m   192.168.30.16   192.168.30.16   <none>           <none>
kube-system   calico-node-mskx7                            1/1     Running   0          20m   192.168.30.17   192.168.30.17   <none>           <none>
kube-system   calico-node-rpjw7                            1/1     Running   0          20m   192.168.30.18   192.168.30.18   <none>           <none>
kube-system   coredns-6665999d97-fmqhs                     1/1     Running   0          19m   172.20.235.1    192.168.30.18   <none>           <none>
kube-system   dashboard-metrics-scraper-57566685b4-fxtrf   1/1     Running   0          19m   172.20.235.4    192.168.30.18   <none>           <none>
kube-system   kubernetes-dashboard-57db9bfd5b-sn6lg        1/1     Running   0          19m   172.20.235.3    192.168.30.18   <none>           <none>
kube-system   metrics-server-6bd9f986fc-9d5gv              1/1     Running   0          19m   172.20.235.2    192.168.30.18   <none>           <none>
kube-system   node-local-dns-p62vc                         1/1     Running   0          19m   192.168.30.16   192.168.30.16   <none>           <none>
kube-system   node-local-dns-v87bq                         1/1     Running   0          19m   192.168.30.17   192.168.30.17   <none>           <none>
kube-system   node-local-dns-xjbpj                         1/1     Running   0          19m   192.168.30.18   192.168.30.18   <none>           <none>

9.5 查看所有资源

输出简略信息。

[root@master01 ~]# kubectl get all -A
NAMESPACE     NAME                                             READY   STATUS    RESTARTS   AGE
kube-system   pod/calico-kube-controllers-89b744d6c-2c8fv      1/1     Running   0          30m
kube-system   pod/calico-node-5vbfg                            1/1     Running   0          30m
kube-system   pod/calico-node-mskx7                            1/1     Running   0          30m
kube-system   pod/calico-node-rpjw7                            1/1     Running   0          30m
kube-system   pod/coredns-6665999d97-fmqhs                     1/1     Running   0          29m
kube-system   pod/dashboard-metrics-scraper-57566685b4-fxtrf   1/1     Running   0          29m
kube-system   pod/kubernetes-dashboard-57db9bfd5b-sn6lg        1/1     Running   0          29m
kube-system   pod/metrics-server-6bd9f986fc-9d5gv              1/1     Running   0          29m
kube-system   pod/node-local-dns-p62vc                         1/1     Running   0          29m
kube-system   pod/node-local-dns-v87bq                         1/1     Running   0          29m
kube-system   pod/node-local-dns-xjbpj                         1/1     Running   0          29m

NAMESPACE     NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes                  ClusterIP   10.68.0.1       <none>        443/TCP                  32m
kube-system   service/dashboard-metrics-scraper   ClusterIP   10.68.185.194   <none>        8000/TCP                 29m
kube-system   service/kube-dns                    ClusterIP   10.68.0.2       <none>        53/UDP,53/TCP,9153/TCP   29m
kube-system   service/kube-dns-upstream           ClusterIP   10.68.114.28    <none>        53/UDP,53/TCP            29m
kube-system   service/kubernetes-dashboard        NodePort    10.68.250.76    <none>        443:32510/TCP            29m
kube-system   service/metrics-server              ClusterIP   10.68.100.48    <none>        443/TCP                  29m
kube-system   service/node-local-dns              ClusterIP   None            <none>        9253/TCP                 29m

NAMESPACE     NAME                            DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-system   daemonset.apps/calico-node      3         3         3       3            3           kubernetes.io/os=linux   30m
kube-system   daemonset.apps/node-local-dns   3         3         3       3            3           <none>                   29m

NAMESPACE     NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
kube-system   deployment.apps/calico-kube-controllers     1/1     1            1           30m
kube-system   deployment.apps/coredns                     1/1     1            1           29m
kube-system   deployment.apps/dashboard-metrics-scraper   1/1     1            1           29m
kube-system   deployment.apps/kubernetes-dashboard        1/1     1            1           29m
kube-system   deployment.apps/metrics-server              1/1     1            1           29m

NAMESPACE     NAME                                                   DESIRED   CURRENT   READY   AGE
kube-system   replicaset.apps/calico-kube-controllers-89b744d6c      1         1         1       30m
kube-system   replicaset.apps/coredns-6665999d97                     1         1         1       29m
kube-system   replicaset.apps/dashboard-metrics-scraper-57566685b4   1         1         1       29m
kube-system   replicaset.apps/kubernetes-dashboard-57db9bfd5b        1         1         1       29m
kube-system   replicaset.apps/metrics-server-6bd9f986fc              1         1         1       29m

输出详细信息

[root@master01 ~]# kubectl get all -A
NAMESPACE     NAME                                             READY   STATUS    RESTARTS   AGE
kube-system   pod/calico-kube-controllers-89b744d6c-2c8fv      1/1     Running   0          30m
kube-system   pod/calico-node-5vbfg                            1/1     Running   0          30m
kube-system   pod/calico-node-mskx7                            1/1     Running   0          30m
kube-system   pod/calico-node-rpjw7                            1/1     Running   0          30m
kube-system   pod/coredns-6665999d97-fmqhs                     1/1     Running   0          29m
kube-system   pod/dashboard-metrics-scraper-57566685b4-fxtrf   1/1     Running   0          29m
kube-system   pod/kubernetes-dashboard-57db9bfd5b-sn6lg        1/1     Running   0          29m
kube-system   pod/metrics-server-6bd9f986fc-9d5gv              1/1     Running   0          29m
kube-system   pod/node-local-dns-p62vc                         1/1     Running   0          29m
kube-system   pod/node-local-dns-v87bq                         1/1     Running   0          29m
kube-system   pod/node-local-dns-xjbpj                         1/1     Running   0          29m

NAMESPACE     NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes                  ClusterIP   10.68.0.1       <none>        443/TCP                  32m
kube-system   service/dashboard-metrics-scraper   ClusterIP   10.68.185.194   <none>        8000/TCP                 29m
kube-system   service/kube-dns                    ClusterIP   10.68.0.2       <none>        53/UDP,53/TCP,9153/TCP   29m
kube-system   service/kube-dns-upstream           ClusterIP   10.68.114.28    <none>        53/UDP,53/TCP            29m
kube-system   service/kubernetes-dashboard        NodePort    10.68.250.76    <none>        443:32510/TCP            29m
kube-system   service/metrics-server              ClusterIP   10.68.100.48    <none>        443/TCP                  29m
kube-system   service/node-local-dns              ClusterIP   None            <none>        9253/TCP                 29m

NAMESPACE     NAME                            DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-system   daemonset.apps/calico-node      3         3         3       3            3           kubernetes.io/os=linux   30m
kube-system   daemonset.apps/node-local-dns   3         3         3       3            3           <none>                   29m

NAMESPACE     NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
kube-system   deployment.apps/calico-kube-controllers     1/1     1            1           30m
kube-system   deployment.apps/coredns                     1/1     1            1           29m
kube-system   deployment.apps/dashboard-metrics-scraper   1/1     1            1           29m
kube-system   deployment.apps/kubernetes-dashboard        1/1     1            1           29m
kube-system   deployment.apps/metrics-server              1/1     1            1           29m

NAMESPACE     NAME                                                   DESIRED   CURRENT   READY   AGE
kube-system   replicaset.apps/calico-kube-controllers-89b744d6c      1         1         1       30m
kube-system   replicaset.apps/coredns-6665999d97                     1         1         1       29m
kube-system   replicaset.apps/dashboard-metrics-scraper-57566685b4   1         1         1       29m
kube-system   replicaset.apps/kubernetes-dashboard-57db9bfd5b        1         1         1       29m
kube-system   replicaset.apps/metrics-server-6bd9f986fc              1         1         1       29m
[root@master01 ~]# 
[root@master01 ~]# kubectl get all -A -o wide
NAMESPACE     NAME                                             READY   STATUS    RESTARTS   AGE   IP              NODE            NOMINATED NODE   READINESS GATES
kube-system   pod/calico-kube-controllers-89b744d6c-2c8fv      1/1     Running   0          31m   192.168.30.18   192.168.30.18   <none>           <none>
kube-system   pod/calico-node-5vbfg                            1/1     Running   0          31m   192.168.30.16   192.168.30.16   <none>           <none>
kube-system   pod/calico-node-mskx7                            1/1     Running   0          31m   192.168.30.17   192.168.30.17   <none>           <none>
kube-system   pod/calico-node-rpjw7                            1/1     Running   0          31m   192.168.30.18   192.168.30.18   <none>           <none>
kube-system   pod/coredns-6665999d97-fmqhs                     1/1     Running   0          30m   172.20.235.1    192.168.30.18   <none>           <none>
kube-system   pod/dashboard-metrics-scraper-57566685b4-fxtrf   1/1     Running   0          30m   172.20.235.4    192.168.30.18   <none>           <none>
kube-system   pod/kubernetes-dashboard-57db9bfd5b-sn6lg        1/1     Running   0          30m   172.20.235.3    192.168.30.18   <none>           <none>
kube-system   pod/metrics-server-6bd9f986fc-9d5gv              1/1     Running   0          30m   172.20.235.2    192.168.30.18   <none>           <none>
kube-system   pod/node-local-dns-p62vc                         1/1     Running   0          30m   192.168.30.16   192.168.30.16   <none>           <none>
kube-system   pod/node-local-dns-v87bq                         1/1     Running   0          30m   192.168.30.17   192.168.30.17   <none>           <none>
kube-system   pod/node-local-dns-xjbpj                         1/1     Running   0          30m   192.168.30.18   192.168.30.18   <none>           <none>

NAMESPACE     NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
default       service/kubernetes                  ClusterIP   10.68.0.1       <none>        443/TCP                  33m   <none>
kube-system   service/dashboard-metrics-scraper   ClusterIP   10.68.185.194   <none>        8000/TCP                 30m   k8s-app=dashboard-metrics-scraper
kube-system   service/kube-dns                    ClusterIP   10.68.0.2       <none>        53/UDP,53/TCP,9153/TCP   30m   k8s-app=kube-dns
kube-system   service/kube-dns-upstream           ClusterIP   10.68.114.28    <none>        53/UDP,53/TCP            30m   k8s-app=kube-dns
kube-system   service/kubernetes-dashboard        NodePort    10.68.250.76    <none>        443:32510/TCP            30m   k8s-app=kubernetes-dashboard
kube-system   service/metrics-server              ClusterIP   10.68.100.48    <none>        443/TCP                  30m   k8s-app=metrics-server
kube-system   service/node-local-dns              ClusterIP   None            <none>        9253/TCP                 30m   k8s-app=node-local-dns

NAMESPACE     NAME                            DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE   CONTAINERS    IMAGES                                                     SELECTOR
kube-system   daemonset.apps/calico-node      3         3         3       3            3           kubernetes.io/os=linux   31m   calico-node   easzlab.io.local:5000/calico/node:v3.23.5                  k8s-app=calico-node
kube-system   daemonset.apps/node-local-dns   3         3         3       3            3           <none>                   30m   node-cache    easzlab.io.local:5000/easzlab/k8s-dns-node-cache:1.22.13   k8s-app=node-local-dns

NAMESPACE     NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS                  IMAGES                                                      SELECTOR
kube-system   deployment.apps/calico-kube-controllers     1/1     1            1           31m   calico-kube-controllers     easzlab.io.local:5000/calico/kube-controllers:v3.23.5       k8s-app=calico-kube-controllers
kube-system   deployment.apps/coredns                     1/1     1            1           30m   coredns                     easzlab.io.local:5000/coredns/coredns:1.9.3                 k8s-app=kube-dns
kube-system   deployment.apps/dashboard-metrics-scraper   1/1     1            1           30m   dashboard-metrics-scraper   easzlab.io.local:5000/kubernetesui/metrics-scraper:v1.0.8   k8s-app=dashboard-metrics-scraper
kube-system   deployment.apps/kubernetes-dashboard        1/1     1            1           30m   kubernetes-dashboard        easzlab.io.local:5000/kubernetesui/dashboard:v2.7.0         k8s-app=kubernetes-dashboard
kube-system   deployment.apps/metrics-server              1/1     1            1           30m   metrics-server              easzlab.io.local:5000/easzlab/metrics-server:v0.5.2         k8s-app=metrics-server

NAMESPACE     NAME                                                   DESIRED   CURRENT   READY   AGE   CONTAINERS                  IMAGES                                                      SELECTOR
kube-system   replicaset.apps/calico-kube-controllers-89b744d6c      1         1         1       31m   calico-kube-controllers     easzlab.io.local:5000/calico/kube-controllers:v3.23.5       k8s-app=calico-kube-controllers,pod-template-hash=89b744d6c
kube-system   replicaset.apps/coredns-6665999d97                     1         1         1       30m   coredns                     easzlab.io.local:5000/coredns/coredns:1.9.3                 k8s-app=kube-dns,pod-template-hash=6665999d97
kube-system   replicaset.apps/dashboard-metrics-scraper-57566685b4   1         1         1       30m   dashboard-metrics-scraper   easzlab.io.local:5000/kubernetesui/metrics-scraper:v1.0.8   k8s-app=dashboard-metrics-scraper,pod-template-hash=57566685b4
kube-system   replicaset.apps/kubernetes-dashboard-57db9bfd5b        1         1         1       30m   kubernetes-dashboard        easzlab.io.local:5000/kubernetesui/dashboard:v2.7.0         k8s-app=kubernetes-dashboard,pod-template-hash=57db9bfd5b
kube-system   replicaset.apps/metrics-server-6bd9f986fc              1         1         1       30m   metrics-server              easzlab.io.local:5000/easzlab/metrics-server:v0.5.2         k8s-app=metrics-server,pod-template-hash=6bd9f986fc

9.6 查看防火墙规则

[root@master01 ~]# iptables -nvL
Chain INPUT (policy ACCEPT 3506 packets, 440K bytes)
 pkts bytes target     prot opt in     out     source               destination         
 379K   55M cali-INPUT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:Cz_u1IQiXIMmKD4c */
    0     0 ACCEPT     udp  --  *      *       0.0.0.0/0            169.254.20.10        udp dpt:53
    0     0 ACCEPT     tcp  --  *      *       0.0.0.0/0            169.254.20.10        tcp dpt:53
 445K  305M KUBE-IPVS-FILTER  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes ipvs access filter */
 445K  305M KUBE-PROXY-FIREWALL  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kube-proxy firewall rules */
 445K  305M KUBE-NODE-PORT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes health check rules */
 448K  306M KUBE-FIREWALL  all  --  *      *       0.0.0.0/0            0.0.0.0/0           

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 cali-FORWARD  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:wUHhoiAYhphO9Mso */
    0     0 KUBE-PROXY-FIREWALL  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kube-proxy firewall rules */
    0     0 KUBE-FORWARD  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes forwarding rules */
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:S93hcgKJrXEqnTfs */ /* Policy explicitly accepted packet. */ mark match 0x10000/0x10000
    0     0 MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:mp77cMpurHhyjLrM */ MARK or 0x10000

Chain OUTPUT (policy ACCEPT 3527 packets, 441K bytes)
 pkts bytes target     prot opt in     out     source               destination         
 385K   55M cali-OUTPUT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:tVnHkvAo15HuiPy0 */
    0     0 ACCEPT     udp  --  *      *       169.254.20.10        0.0.0.0/0            udp spt:53
    0     0 ACCEPT     tcp  --  *      *       169.254.20.10        0.0.0.0/0            tcp spt:53
 452K   70M KUBE-FIREWALL  all  --  *      *       0.0.0.0/0            0.0.0.0/0           

Chain KUBE-FIREWALL (2 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  *      *      !127.0.0.0/8          127.0.0.0/8          /* block incoming localnet connections */ ! ctstate RELATED,ESTABLISHED,DNAT
    0     0 DROP       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000

Chain KUBE-FORWARD (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes forwarding rules */ mark match 0x4000/0x4000
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes forwarding conntrack rule */ ctstate RELATED,ESTABLISHED

Chain KUBE-IPVS-FILTER (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0            match-set KUBE-LOAD-BALANCER dst,dst
    0     0 RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0            match-set KUBE-CLUSTER-IP dst,dst
    0     0 RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0            match-set KUBE-EXTERNAL-IP dst,dst
    0     0 REJECT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            ctstate NEW match-set KUBE-IPVS-IPS dst reject-with icmp-port-unreachable

Chain KUBE-KUBELET-CANARY (0 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain KUBE-NODE-PORT (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* Kubernetes health check node port */ match-set KUBE-HEALTH-CHECK-NODE-PORT dst

Chain KUBE-PROXY-FIREWALL (2 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain KUBE-SOURCE-RANGES-FIREWALL (0 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  *      *       0.0.0.0/0            0.0.0.0/0           

Chain cali-FORWARD (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:vjrMJCRpqwy5oRoX */ MARK and 0xfff1ffff
    0     0 cali-from-hep-forward  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:A_sPAO0mcxbT9mOV */ mark match 0x0/0x10000
    0     0 cali-from-wl-dispatch  all  --  cali+  *       0.0.0.0/0            0.0.0.0/0            /* cali:8ZoYfO5HKXWbB3pk */
    0     0 cali-to-wl-dispatch  all  --  *      cali+   0.0.0.0/0            0.0.0.0/0            /* cali:jdEuaPBe14V2hutn */
    0     0 cali-to-hep-forward  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:12bc6HljsMKsmfr- */
    0     0 cali-cidr-block  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:NOSxoaGx8OIstr1z */

Chain cali-INPUT (1 references)
 pkts bytes target     prot opt in     out     source               destination         
 1850  593K ACCEPT     4    --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:PajejrV4aFdkZojI */ /* Allow IPIP packets from Calico hosts */ match-set cali40all-hosts-net src ADDRTYPE match dst-type LOCAL
    0     0 DROP       4    --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:_wjq-Yrma8Ly1Svo */ /* Drop IPIP packets from non-Calico hosts */
 417K  113M MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:ss8lEMQsXi-s6qYT */ MARK and 0xfffff
 417K  113M cali-forward-check  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:PgIW-V0nEjwPhF_8 */
   13  1530 RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:QMJlDwlS0OjHyfMN */ mark match ! 0x0/0xfff00000
    0     0 cali-wl-to-host  all  --  cali+  *       0.0.0.0/0            0.0.0.0/0           [goto]  /* cali:nDRe73txrna-aZjG */
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:iX2AYvqGXaVqwkro */ mark match 0x10000/0x10000
 417K  113M MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:bhpnxD5IRtBP8KW0 */ MARK and 0xfff0ffff
 417K  113M cali-from-host-endpoint  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:H5_bccAbHV0sooVy */
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:inBL01YlfurT0dbI */ /* Host endpoint policy accepted packet. */ mark match 0x10000/0x10000

Chain cali-OUTPUT (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:Mq1_rAdXXH3YkrzW */ mark match 0x10000/0x10000
    0     0 cali-forward-endpoint-mark  all  --  *      *       0.0.0.0/0            0.0.0.0/0           [goto]  /* cali:5Z67OUUpTOM7Xa1a */ mark match ! 0x0/0xfff00000
    0     0 RETURN     all  --  *      cali+   0.0.0.0/0            0.0.0.0/0            /* cali:M2Wf0OehNdig8MHR */
 2202  254K ACCEPT     4    --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:AJBkLho_0Qd8LNr3 */ /* Allow IPIP packets to other Calico hosts */ match-set cali40all-hosts-net dst ADDRTYPE match src-type LOCAL
 423K   63M MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:iz2RWXlXJDUfsLpe */ MARK and 0xfff0ffff
 423K   63M cali-to-host-endpoint  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:xQqLi8S0sxbiyvjR */ ! ctstate DNAT
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:aSnsxZdmhxm_ilRZ */ /* Host endpoint policy accepted packet. */ mark match 0x10000/0x10000

Chain cali-cidr-block (1 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain cali-forward-check (1 references)
 pkts bytes target     prot opt in     out     source               destination         
 392K  111M RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:Pbldlb4FaULvpdD8 */ ctstate RELATED,ESTABLISHED
    0     0 cali-set-endpoint-mark  tcp  --  *      *       0.0.0.0/0            0.0.0.0/0           [goto]  /* cali:ZD-6UxuUtGW-xtzg */ /* To kubernetes NodePort service */ multiport dports 30000:32767 match-set cali40this-host dst
    0     0 cali-set-endpoint-mark  udp  --  *      *       0.0.0.0/0            0.0.0.0/0           [goto]  /* cali:CbPfUajQ2bFVnDq4 */ /* To kubernetes NodePort service */ multiport dports 30000:32767 match-set cali40this-host dst
   13  1530 cali-set-endpoint-mark  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:jmhU0ODogX-Zfe5g */ /* To kubernetes service */ ! match-set cali40this-host dst

Chain cali-forward-endpoint-mark (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 cali-from-endpoint-mark  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:O0SmFDrnm7KggWqW */ mark match ! 0x100000/0xfff00000
    0     0 cali-to-wl-dispatch  all  --  *      cali+   0.0.0.0/0            0.0.0.0/0            /* cali:aFl0WFKRxDqj8oA6 */
    0     0 cali-to-hep-forward  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:AZKVrO3i_8cLai5f */
    0     0 MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:96HaP1sFtb-NYoYA */ MARK and 0xfffff
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:VxO6hyNWz62YEtul */ /* Policy explicitly accepted packet. */ mark match 0x10000/0x10000

Chain cali-from-endpoint-mark (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:9dpftzl-pNycbr37 */ /* Unknown interface */

Chain cali-from-hep-forward (1 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain cali-from-host-endpoint (1 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain cali-from-wl-dispatch (2 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:zTj6P0TIgYvgz-md */ /* Unknown interface */

Chain cali-set-endpoint-mark (3 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  cali+  *       0.0.0.0/0            0.0.0.0/0            /* cali:MN61lcxFj1yWuYBo */ /* Unknown endpoint */
   13  1530 MARK       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:nKOjq8N2yzfmS3jk */ /* Non-Cali endpoint mark */ MARK xset 0x100000/0xfff00000

Chain cali-to-hep-forward (2 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain cali-to-host-endpoint (1 references)
 pkts bytes target     prot opt in     out     source               destination         

Chain cali-to-wl-dispatch (2 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:7KNphB1nNHw80nIO */ /* Unknown interface */

Chain cali-wl-to-host (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 cali-from-wl-dispatch  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:Ee9Sbo10IpVujdIY */
    0     0 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:nSZbcOoG1xPONxb8 */ /* Configured DefaultEndpointToHostAction */

[root@master01 ~]# iptables-save 
# Generated by iptables-save v1.4.21 on Sun Feb 26 22:25:49 2023
*nat
:PREROUTING ACCEPT [9:540]
:INPUT ACCEPT [9:540]
:OUTPUT ACCEPT [10:600]
:POSTROUTING ACCEPT [10:600]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-LOAD-BALANCER - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODE-PORT - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SERVICES - [0:0]
:cali-OUTPUT - [0:0]
:cali-POSTROUTING - [0:0]
:cali-PREROUTING - [0:0]
:cali-fip-dnat - [0:0]
:cali-fip-snat - [0:0]
:cali-nat-outgoing - [0:0]
-A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A POSTROUTING -m comment --comment "cali:O3lYWMrLQYEMJtB5" -j cali-POSTROUTING
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-LOAD-BALANCER -j KUBE-MARK-MASQ
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
-A KUBE-NODE-PORT -p tcp -m comment --comment "Kubernetes nodeport TCP port for masquerade purpose" -m set --match-set KUBE-NODE-PORT-TCP dst -j KUBE-MARK-MASQ
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SERVICES -s 127.0.0.0/8 -j RETURN
-A KUBE-SERVICES ! -s 172.20.0.0/16 -m comment --comment "Kubernetes service cluster ip + port for masquerade purpose" -m set --match-set KUBE-CLUSTER-IP dst,dst -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m addrtype --dst-type LOCAL -j KUBE-NODE-PORT
-A KUBE-SERVICES -m set --match-set KUBE-CLUSTER-IP dst,dst -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:GBTAv2p5CwevEyJm" -j cali-fip-dnat
-A cali-POSTROUTING -m comment --comment "cali:Z-c7XtVd2Bq7s_hA" -j cali-fip-snat
-A cali-POSTROUTING -m comment --comment "cali:nYKhEzDlr11Jccal" -j cali-nat-outgoing
-A cali-POSTROUTING -o tunl0 -m comment --comment "cali:JHlpT-eSqR1TvyYm" -m addrtype ! --src-type LOCAL --limit-iface-out -m addrtype --src-type LOCAL -j MASQUERADE
-A cali-PREROUTING -m comment --comment "cali:r6XmIziWUJsdOK6Z" -j cali-fip-dnat
-A cali-nat-outgoing -m comment --comment "cali:Dw4T8UWPnCLxRJiI" -m set --match-set cali40masq-ipam-pools src -m set ! --match-set cali40all-ipam-pools dst -j MASQUERADE
COMMIT
# Completed on Sun Feb 26 22:25:49 2023
# Generated by iptables-save v1.4.21 on Sun Feb 26 22:25:49 2023
*filter
:INPUT ACCEPT [2625:408749]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [2655:385114]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-IPVS-FILTER - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:KUBE-NODE-PORT - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
:KUBE-SOURCE-RANGES-FIREWALL - [0:0]
:cali-FORWARD - [0:0]
:cali-INPUT - [0:0]
:cali-OUTPUT - [0:0]
:cali-cidr-block - [0:0]
:cali-forward-check - [0:0]
:cali-forward-endpoint-mark - [0:0]
:cali-from-endpoint-mark - [0:0]
:cali-from-hep-forward - [0:0]
:cali-from-host-endpoint - [0:0]
:cali-from-wl-dispatch - [0:0]
:cali-set-endpoint-mark - [0:0]
:cali-to-hep-forward - [0:0]
:cali-to-host-endpoint - [0:0]
:cali-to-wl-dispatch - [0:0]
:cali-wl-to-host - [0:0]
-A INPUT -m comment --comment "cali:Cz_u1IQiXIMmKD4c" -j cali-INPUT
-A INPUT -d 169.254.20.10/32 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -d 169.254.20.10/32 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -m comment --comment "kubernetes ipvs access filter" -j KUBE-IPVS-FILTER
-A INPUT -m comment --comment "kube-proxy firewall rules" -j KUBE-PROXY-FIREWALL
-A INPUT -m comment --comment "kubernetes health check rules" -j KUBE-NODE-PORT
-A INPUT -j KUBE-FIREWALL
-A FORWARD -m comment --comment "cali:wUHhoiAYhphO9Mso" -j cali-FORWARD
-A FORWARD -m comment --comment "kube-proxy firewall rules" -j KUBE-PROXY-FIREWALL
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -m comment --comment "cali:S93hcgKJrXEqnTfs" -m comment --comment "Policy explicitly accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A FORWARD -m comment --comment "cali:mp77cMpurHhyjLrM" -j MARK --set-xmark 0x10000/0x10000
-A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT
-A OUTPUT -s 169.254.20.10/32 -p udp -m udp --sport 53 -j ACCEPT
-A OUTPUT -s 169.254.20.10/32 -p tcp -m tcp --sport 53 -j ACCEPT
-A OUTPUT -j KUBE-FIREWALL
-A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-IPVS-FILTER -m set --match-set KUBE-LOAD-BALANCER dst,dst -j RETURN
-A KUBE-IPVS-FILTER -m set --match-set KUBE-CLUSTER-IP dst,dst -j RETURN
-A KUBE-IPVS-FILTER -m set --match-set KUBE-EXTERNAL-IP dst,dst -j RETURN
-A KUBE-IPVS-FILTER -m conntrack --ctstate NEW -m set --match-set KUBE-IPVS-IPS dst -j REJECT --reject-with icmp-port-unreachable
-A KUBE-NODE-PORT -m comment --comment "Kubernetes health check node port" -m set --match-set KUBE-HEALTH-CHECK-NODE-PORT dst -j ACCEPT
-A KUBE-SOURCE-RANGES-FIREWALL -j DROP
-A cali-FORWARD -m comment --comment "cali:vjrMJCRpqwy5oRoX" -j MARK --set-xmark 0x0/0xe0000
-A cali-FORWARD -m comment --comment "cali:A_sPAO0mcxbT9mOV" -m mark --mark 0x0/0x10000 -j cali-from-hep-forward
-A cali-FORWARD -i cali+ -m comment --comment "cali:8ZoYfO5HKXWbB3pk" -j cali-from-wl-dispatch
-A cali-FORWARD -o cali+ -m comment --comment "cali:jdEuaPBe14V2hutn" -j cali-to-wl-dispatch
-A cali-FORWARD -m comment --comment "cali:12bc6HljsMKsmfr-" -j cali-to-hep-forward
-A cali-FORWARD -m comment --comment "cali:NOSxoaGx8OIstr1z" -j cali-cidr-block
-A cali-INPUT -p ipv4 -m comment --comment "cali:PajejrV4aFdkZojI" -m comment --comment "Allow IPIP packets from Calico hosts" -m set --match-set cali40all-hosts-net src -m addrtype --dst-type LOCAL -j ACCEPT
-A cali-INPUT -p ipv4 -m comment --comment "cali:_wjq-Yrma8Ly1Svo" -m comment --comment "Drop IPIP packets from non-Calico hosts" -j DROP
-A cali-INPUT -m comment --comment "cali:ss8lEMQsXi-s6qYT" -j MARK --set-xmark 0x0/0xfff00000
-A cali-INPUT -m comment --comment "cali:PgIW-V0nEjwPhF_8" -j cali-forward-check
-A cali-INPUT -m comment --comment "cali:QMJlDwlS0OjHyfMN" -m mark ! --mark 0x0/0xfff00000 -j RETURN
-A cali-INPUT -i cali+ -m comment --comment "cali:nDRe73txrna-aZjG" -g cali-wl-to-host
-A cali-INPUT -m comment --comment "cali:iX2AYvqGXaVqwkro" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-INPUT -m comment --comment "cali:bhpnxD5IRtBP8KW0" -j MARK --set-xmark 0x0/0xf0000
-A cali-INPUT -m comment --comment "cali:H5_bccAbHV0sooVy" -j cali-from-host-endpoint
-A cali-INPUT -m comment --comment "cali:inBL01YlfurT0dbI" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:Mq1_rAdXXH3YkrzW" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:5Z67OUUpTOM7Xa1a" -m mark ! --mark 0x0/0xfff00000 -g cali-forward-endpoint-mark
-A cali-OUTPUT -o cali+ -m comment --comment "cali:M2Wf0OehNdig8MHR" -j RETURN
-A cali-OUTPUT -p ipv4 -m comment --comment "cali:AJBkLho_0Qd8LNr3" -m comment --comment "Allow IPIP packets to other Calico hosts" -m set --match-set cali40all-hosts-net dst -m addrtype --src-type LOCAL -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:iz2RWXlXJDUfsLpe" -j MARK --set-xmark 0x0/0xf0000
-A cali-OUTPUT -m comment --comment "cali:xQqLi8S0sxbiyvjR" -m conntrack ! --ctstate DNAT -j cali-to-host-endpoint
-A cali-OUTPUT -m comment --comment "cali:aSnsxZdmhxm_ilRZ" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-forward-check -m comment --comment "cali:Pbldlb4FaULvpdD8" -m conntrack --ctstate RELATED,ESTABLISHED -j RETURN
-A cali-forward-check -p tcp -m comment --comment "cali:ZD-6UxuUtGW-xtzg" -m comment --comment "To kubernetes NodePort service" -m multiport --dports 30000:32767 -m set --match-set cali40this-host dst -g cali-set-endpoint-mark
-A cali-forward-check -p udp -m comment --comment "cali:CbPfUajQ2bFVnDq4" -m comment --comment "To kubernetes NodePort service" -m multiport --dports 30000:32767 -m set --match-set cali40this-host dst -g cali-set-endpoint-mark
-A cali-forward-check -m comment --comment "cali:jmhU0ODogX-Zfe5g" -m comment --comment "To kubernetes service" -m set ! --match-set cali40this-host dst -j cali-set-endpoint-mark
-A cali-forward-endpoint-mark -m comment --comment "cali:O0SmFDrnm7KggWqW" -m mark ! --mark 0x100000/0xfff00000 -j cali-from-endpoint-mark
-A cali-forward-endpoint-mark -o cali+ -m comment --comment "cali:aFl0WFKRxDqj8oA6" -j cali-to-wl-dispatch
-A cali-forward-endpoint-mark -m comment --comment "cali:AZKVrO3i_8cLai5f" -j cali-to-hep-forward
-A cali-forward-endpoint-mark -m comment --comment "cali:96HaP1sFtb-NYoYA" -j MARK --set-xmark 0x0/0xfff00000
-A cali-forward-endpoint-mark -m comment --comment "cali:VxO6hyNWz62YEtul" -m comment --comment "Policy explicitly accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-from-endpoint-mark -m comment --comment "cali:9dpftzl-pNycbr37" -m comment --comment "Unknown interface" -j DROP
-A cali-from-wl-dispatch -m comment --comment "cali:zTj6P0TIgYvgz-md" -m comment --comment "Unknown interface" -j DROP
-A cali-set-endpoint-mark -i cali+ -m comment --comment "cali:MN61lcxFj1yWuYBo" -m comment --comment "Unknown endpoint" -j DROP
-A cali-set-endpoint-mark -m comment --comment "cali:nKOjq8N2yzfmS3jk" -m comment --comment "Non-Cali endpoint mark" -j MARK --set-xmark 0x100000/0xfff00000
-A cali-to-wl-dispatch -m comment --comment "cali:7KNphB1nNHw80nIO" -m comment --comment "Unknown interface" -j DROP
-A cali-wl-to-host -m comment --comment "cali:Ee9Sbo10IpVujdIY" -j cali-from-wl-dispatch
-A cali-wl-to-host -m comment --comment "cali:nSZbcOoG1xPONxb8" -m comment --comment "Configured DefaultEndpointToHostAction" -j ACCEPT
COMMIT
# Completed on Sun Feb 26 22:25:49 2023
# Generated by iptables-save v1.4.21 on Sun Feb 26 22:25:49 2023
*mangle
:PREROUTING ACCEPT [24564:1944537]
:INPUT ACCEPT [423053:113720623]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [430259:64336196]
:POSTROUTING ACCEPT [429759:64307064]
:KUBE-IPTABLES-HINT - [0:0]
:KUBE-KUBELET-CANARY - [0:0]
:cali-POSTROUTING - [0:0]
:cali-PREROUTING - [0:0]
:cali-from-host-endpoint - [0:0]
:cali-to-host-endpoint - [0:0]
-A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING
-A POSTROUTING -m comment --comment "cali:O3lYWMrLQYEMJtB5" -j cali-POSTROUTING
-A cali-POSTROUTING -m comment --comment "cali:NX-7roTexQ3fGRfU" -m mark --mark 0x10000/0x10000 -j RETURN
-A cali-POSTROUTING -m comment --comment "cali:qaajsWArU1ku9saf" -m mark ! --mark 0x0/0xfff00000 -j RETURN
-A cali-POSTROUTING -m comment --comment "cali:N2faOPfc4DVQAfQj" -j MARK --set-xmark 0x0/0xf0000
-A cali-POSTROUTING -m comment --comment "cali:IR1ghU6yHNWsaaJF" -m conntrack --ctstate DNAT -j cali-to-host-endpoint
-A cali-POSTROUTING -m comment --comment "cali:fcjhvOBNywbfCkS2" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j RETURN
-A cali-PREROUTING -m comment --comment "cali:6BJqBjBC7crtA-7-" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A cali-PREROUTING -m comment --comment "cali:KX7AGNd6rMcDUai6" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-PREROUTING -m comment --comment "cali:wNH7KsA3ILKJBsY9" -j cali-from-host-endpoint
-A cali-PREROUTING -m comment --comment "cali:Cg96MgVuoPm7UMRo" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
COMMIT
# Completed on Sun Feb 26 22:25:49 2023
# Generated by iptables-save v1.4.21 on Sun Feb 26 22:25:49 2023
*raw
:PREROUTING ACCEPT [384004:56010418]
:OUTPUT ACCEPT [389774:55632241]
:cali-OUTPUT - [0:0]
:cali-PREROUTING - [0:0]
:cali-from-host-endpoint - [0:0]
:cali-rpf-skip - [0:0]
:cali-to-host-endpoint - [0:0]
-A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING
-A PREROUTING -d 169.254.20.10/32 -p udp -m udp --dport 53 -j NOTRACK
-A PREROUTING -d 169.254.20.10/32 -p tcp -m tcp --dport 53 -j NOTRACK
-A PREROUTING -p udp -m udp --dport 123 -j NOTRACK
-A PREROUTING -p udp -m udp --dport 123 -j NOTRACK
-A PREROUTING -p udp -m udp --dport 123 -j NOTRACK
-A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT
-A OUTPUT -s 169.254.20.10/32 -p tcp -m tcp --sport 8080 -j NOTRACK
-A OUTPUT -d 169.254.20.10/32 -p tcp -m tcp --dport 8080 -j NOTRACK
-A OUTPUT -d 169.254.20.10/32 -p udp -m udp --dport 53 -j NOTRACK
-A OUTPUT -d 169.254.20.10/32 -p tcp -m tcp --dport 53 -j NOTRACK
-A OUTPUT -s 169.254.20.10/32 -p udp -m udp --sport 53 -j NOTRACK
-A OUTPUT -s 169.254.20.10/32 -p tcp -m tcp --sport 53 -j NOTRACK
-A OUTPUT -p udp -m udp --sport 123 -j NOTRACK
-A OUTPUT -p udp -m udp --sport 123 -j NOTRACK
-A OUTPUT -p udp -m udp --sport 123 -j NOTRACK
-A cali-OUTPUT -m comment --comment "cali:njdnLwYeGqBJyMxW" -j MARK --set-xmark 0x0/0xf0000
-A cali-OUTPUT -m comment --comment "cali:rz86uTUcEZAfFsh7" -j cali-to-host-endpoint
-A cali-OUTPUT -m comment --comment "cali:pN0F5zD0b8yf9W1Z" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-PREROUTING -m comment --comment "cali:XFX5xbM8B9qR10JG" -j MARK --set-xmark 0x0/0xf0000
-A cali-PREROUTING -i cali+ -m comment --comment "cali:EWMPb0zVROM-woQp" -j MARK --set-xmark 0x40000/0x40000
-A cali-PREROUTING -m comment --comment "cali:PWuxTAIaFCtsg5Qa" -m mark --mark 0x40000/0x40000 -j cali-rpf-skip
-A cali-PREROUTING -m comment --comment "cali:fSSbGND7dgyemWU7" -m mark --mark 0x40000/0x40000 -m rpfilter --validmark --invert -j DROP
-A cali-PREROUTING -m comment --comment "cali:ImU0-4Rl2WoOI9Ou" -m mark --mark 0x0/0x40000 -j cali-from-host-endpoint
-A cali-PREROUTING -m comment --comment "cali:lV4V2MPoMBf0hl9T" -m mark --mark 0x10000/0x10000 -j ACCEPT
COMMIT
# Completed on Sun Feb 26 22:25:49 2023

9.7 查看ipvs规则

yum install -y ipvsadm
[root@master01 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  169.254.20.10:32510 rr
  -> 172.20.235.3:8443            Masq    1      0          0         
TCP  172.20.241.64:32510 rr
  -> 172.20.235.3:8443            Masq    1      0          0         
TCP  192.168.30.16:32510 rr
  -> 172.20.235.3:8443            Masq    1      0          0         
TCP  10.68.0.1:443 rr
  -> 192.168.30.16:6443           Masq    1      0          0         
  -> 192.168.30.17:6443           Masq    1      0          0         
TCP  10.68.0.2:53 rr
  -> 172.20.235.1:53              Masq    1      0          0         
TCP  10.68.0.2:9153 rr
  -> 172.20.235.1:9153            Masq    1      0          0         
TCP  10.68.100.48:443 rr
  -> 172.20.235.2:4443            Masq    1      0          0         
TCP  10.68.114.28:53 rr
  -> 172.20.235.1:53              Masq    1      0          0         
TCP  10.68.185.194:8000 rr
  -> 172.20.235.4:8000            Masq    1      0          0         
TCP  10.68.250.76:443 rr
  -> 172.20.235.3:8443            Masq    1      0          0         
UDP  10.68.0.2:53 rr
  -> 172.20.235.1:53              Masq    1      0          0         
UDP  10.68.114.28:53 rr
  -> 172.20.235.1:53              Masq    1      0          0         

9.8 查看每台机器的/etc/hosts文件

说明:所有节点都查看。
结果如下,使用本地镜像仓库的镜像时候要用easzlab.io.local这个域名。

cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.30.13    easzlab.io.local

十、登录dashboard

10.1 获取token

kubectl describe secret `kubectl get secret -n kube-system|grep admin|awk '{print $1}'`  -n kube-system
Name:         admin-user
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 5e38b37f-e239-4846-9c79-6336ae825db0

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1310 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkpqeGlWMlVSOUNmaHpuR2Z4NUhJQlJTVXc4WjVjekZCZ3o5Zm9MUGdRZHcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI1ZTM4YjM3Zi1lMjM5LTQ4NDYtOWM3OS02MzM2YWU4MjVkYjAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.hzzvHDjaO-zGoAIQpYHOCJB7CIiexj27ZBaalV71F22GPlT7zlcUhJ9c0AG92SKKruDW51P3xs9SSpjJoJbsjBug0nPVtMbATzCvcYSI1Oawh1H-fDZochTm8KibZNtgWpBklgWLEj6AicOMgqLKVp7crfdwLeEZQ4j9utlTf_X0P2QkXPnokaHBEt9zdcxAg5rr5APiXfOk9yDUJqXqfJjCd0Kk0ORnHyZ1nwZBUlPVIGMvn8StPI1pjlhRZLTkbkKesrb24hXh_CdHaY92ABKGHSDTksKX2JwPvlctNfNrurWaaLz2qFmHFTEn8ROv5HzS676rpbWbjVaWmXgtuA

10.2 查看登录地址

kubectl get svc -A|grep kubernetes-dashboard|awk -F '[:| |/]+' '{ip="192.168.30.18"; print "登录地址为: https://"ip":"$7}'
登录地址为: https://192.168.30.18:32510

浏览器输入登录地址 https://192.168.30.18:32510
在这里插入图片描述
输入10.1 获取到的 token即可登录。

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐