1. 环境信息

1.1. 系统及版本

[root@k8s-master01 ~]# cat /etc/redhat-release
Red Hat Enterprise Linux release 9.1 (Plow)
[root@k8s-master01 ~]#

1.2. 配置信息

序号名称IP地址CPU/颗内存/GB硬盘/GB描述
1k8s-master01192.168.1.1144100kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
2k8s-master02192.168.1.1244100kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
3k8s-master03192.168.1.1344100kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx
4k8s-node01192.168.1.2144100kubelet、kube-proxy、nfs-client、nginx
5k8s-node02192.168.1.2244100kubelet、kube-proxy、nfs-client、nginx
6k8s-node03192.168.1.2344100kubelet、kube-proxy、nfs-client、nginx
7VIP192.168.1.51浮动IP地址

1.3. 软件版本

序号软件版本描述
1kernel5.14.0-307.el9.x86_64
2RedHat9.1
3kube-apiserver、kube-controller-manager、
kube-scheduler、kubelet、kube-proxy
1.27.1
4etcd3.5.9
5containerdv1.7.1
6cfssl1.6.4
7cfssljson1.6.4
8cni1.3.0
9crictl1.27.0
10haproxyhaproxy-2.4.17-6.el9.x86_64
11keepalivedkeepalived-2.2.4-6.el9.x86_64

1.4. 网络信息

序号用途网段网关描述
1物理主机192.168.1.0/24192.168.1.1
2service192.168.200.0/22192.168.200.1自定义
3POD192.168.204.0/22192.168.204.1自定义

2. 系统配置

2.1. 配置IP地址

[root@master ~]# nmcli connection modify ens16 ipv4.addresses 192.168.1.11/24
[root@master ~]# nmcli connection modify ens16 ipv4.gateway 192.168.1.1
[root@master ~]# nmcli connection modify ens16 ipv4.method manual
[root@master ~]# nmcli connection modify ens16 ipv4.dns 114.114.114.114
[root@master ~]# nmcli connection up ens16

2.2. 主机名配置

[root@master ~]# hostnamectl set-hostname k8s-master01
[root@master ~]# hostnamectl set-hostname k8s-master02
[root@master ~]# hostnamectl set-hostname k8s-master03
[root@master ~]# hostnamectl set-hostname k8s-node01
[root@master ~]# hostnamectl set-hostname k8s-node02
[root@master ~]# hostnamectl set-hostname k8s-node03

2.3. YUM源配置

[root@k8s-master01 yum.repos.d]# cd /etc/yum.repos.d/
[root@k8s-master01 yum.repos.d]# rm -rf * 
[root@k8s-master01 yum.repos.d]# cat > aliyun.repo << EOF
[aliyun-app]
name=aliyun-appsteam
baseurl=https://mirrors.aliyun.com/centos-stream/9-stream/AppStream/x86_64/os/
gpgcheck=0

[aliyun-base]
name=aliyun-baseos
baseurl=https://mirrors.aliyun.com/centos-stream/9-stream/BaseOS/x86_64/os/
gpgcheck=0

EOF
[root@k8s-master01 yum.repos.d]#
[root@k8s-master01 yum.repos.d]# yum install -y wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curlgcc bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp lm_sensors-libs ipset-libs
[root@k8s-master01 yum.repos.d]# yum clean all
[root@k8s-master01 yum.repos.d]# yum makecache

2.4. 软件下载

可通过以下连接下载当前最新版的软件版本。

https://github.com/containernetworking/plugins/releases/
https://github.com/containerd/containerd/releases/
https://github.com/kubernetes-sigs/cri-tools/releases/
https://github.com/Mirantis/cri-dockerd/releases/
https://github.com/etcd-io/etcd/releases/
https://github.com/cloudflare/cfssl/releases/
https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
https://download.docker.com/linux/static/stable/x86_64/
https://github.com/opencontainers/runc/releases/
https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/

2.5. 系统配置

2.5.1. 关闭防火墙

[root@k8s-master01 ~]# systemctl disable --now firewalld
[root@k8s-master01 ~]# 

2.5.2. 关闭SELinux

[root@k8s-master01 ~]# setenforce 0
[root@k8s-master01 ~]# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

2.5.3. 关闭SWAP交换分区

[root@k8s-master01 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@k8s-master01 ~]# swapoff -a && sysctl -w vm.swappiness=0
[root@k8s-master01 ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri May  5 07:27:34 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/rhel-root00 /                       ext4    defaults        1 1
UUID=43b614b8-111c-428e-995b-925cec978b0f /boot                   ext4    defaults        1 2
/dev/mapper/rhel-root   /home                   ext4    defaults        1 2
/dev/mapper/rhel-var    /var                    ext4    defaults        1 2
/dev/mapper/rhel-var_log /var/log                ext4    defaults        1 2
#/dev/mapper/rhel-swap   none                    swap    defaults        0 0
[root@k8s-master01 ~]#

2.5.4. 网络配置


[root@k8s-master01 ~]# cat > /etc/NetworkManager/conf.d/calico.conf << EOF 
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
[root@k8s-master01 ~]# systemctl restart NetworkManager

2.5.5. 时钟同步

[root@k8s-master01 ~]# yum install chrony -y
[root@k8s-master01 ~]#  cat > /etc/chrony.conf << EOF 
pool 时钟服务器地址 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.1.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

[root@k8s-master01 ~]#  systemctl restart chronyd ; systemctl enable chronyd
[root@k8s-master01 ~]# chronyc sources -v

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current best, '+' = combined, '-' = not combined,
| /             'x' = may be in error, '~' = too variable, '?' = unusable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* 192.168.10.10                  3  10   377  1004   -615us[ -725us] +/-  161ms
[root@k8s-master01 ~]# 

2.5.6. 配置ulimit

[root@k8s-master01 ~]# ulimit -SHn 65535
[root@k8s-master01 ~]# cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
[root@k8s-master01 ~]# EOF

2.5.7. 配置免密登录(互信)

[root@k8s-master01 ~]# yum install -y sshpass
[root@k8s-master01 ~]# ssh-keygen -f /root/.ssh/id_rsa -P ''
[root@k8s-master01 ~]# export IP="192.168.1.11 192.168.1.12 192.168.1.13 192.168.2.21 192.168.2.22 192.168.2.23"
[root@k8s-master01 ~]# export SSHPASS=123123
[root@k8s-master01 ~]# for HOST in $IP;do sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST;done

2.5.8. kernel升级

[root@k8s-master01 ~]# yum install https://www.elrepo.org/elrepo-release-9.el9.elrepo.noarch.rpm
[root@k8s-master01 ~]# sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo 
[root@k8s-master01 ~]# sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo 
[root@k8s-master01 ~]# yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available
[root@k8s-master01 ~]# 这里选择的是稳定版kernel-ml   如需更新长期维护版本kernel-lt  
[root@k8s-master01 ~]# yum -y --enablerepo=elrepo-kernel  install  kernel-ml
[root@k8s-master01 ~]# rpm -qa | grep kernel
[root@k8s-master01 ~]# 查看默认内核
[root@k8s-master01 ~]# grubby --default-kernel
[root@k8s-master01 ~]# 若不是最新的使用命令设置
[root@k8s-master01 ~]# grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo)
[root@k8s-master01 ~]# 重启生效
[root@k8s-master01 ~]# reboot

2.5.9. 模块加载

[root@k8s-master01 ~]# cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
[root@k8s-master01 ~]# systemctl restart systemd-modules-load.service
[root@k8s-master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack
nf_conntrack_netlink    57344  0
nfnetlink              20480  5 nft_compat,nf_conntrack_netlink,nf_tables,ip_set
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  47
ip_vs                 204800  53 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          188416  7 xt_conntrack,nf_nat,xt_nat,nf_conntrack_netlink,xt_CT,xt_MASQUERADE,ip_vs
nf_defrag_ipv6         24576  4 nf_conntrack,xt_socket,xt_TPROXY,ip_vs
nf_defrag_ipv4         16384  3 nf_conntrack,xt_socket,xt_TPROXY
libcrc32c              16384  4 nf_conntrack,nf_nat,nf_tables,ip_vs
[root@k8s-master01 ~]#

2.5.10. 修改内核参数

[root@k8s-master01 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF

[root@k8s-master01 ~]# sysctl --system

2.5.11. 配置hosts本地解析

[root@k8s-master01 ~]# cat >> /etc/hosts << EOF

192.168.1.11 k8s-master01
192.168.1.12 k8s-master02
192.168.1.13 k8s-master03
192.168.1.21 k8s-node01
192.168.1.22 k8s-node02
192.168.1.23 k8s-node03
192.168.1.51 lb-vip
EOF
[root@k8s-master01 ~]#

3. 基本组件安装

3.1. 安装containerd作为Runtime(master、node节点都操作)

3.1.1. 将软件包发送到各个节点

[root@k8s-master01 ~]# grep k8s /etc/hosts | awk '{print $2}' | xargs -i scp cni-plugins-linux-amd64-v1.3.0.tgz root@{}:/root/  
[root@k8s-master01 ~]# grep k8s /etc/hosts | awk '{print $2}' | xargs -i scp crictl-v1.27.0-linux-amd64.tar.gz root@{}:/root/  
[root@k8s-master01 ~]# grep k8s /etc/hosts | awk '{print $2}' | xargs -i scp kubernetes-server-linux-amd64.tar.gz root@{}:/root/
[root@k8s-master01 ~]# grep k8s /etc/hosts | awk '{print $2}' | xargs -i scp cri-containerd-cni-1.7.1-linux-amd64.tar.gz root@{}:/root/   
[root@k8s-master01 ~]#

3.1.2. 开始安装

[root@k8s-master01 ~]# mkdir -pv /etc/cni/net.d /opt/cni/bin
[root@k8s-master01 ~]# tar -zxvf /root/cri-containerd-cni-1.7.1-linux-amd64.tar.gz -C /
[root@k8s-master01 ~]# tar -zxvf /root/cni-plugins-linux-amd64-v1.3.0.tgz -C /opt/cni/bin/

3.1.3. 创建服务启动文件

[root@k8s-master01 ~]# cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master01 ~]# 

3.1.4. containerd所需模块加载

[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
[root@k8s-master01 ~]# systemctl restart systemd-modules-load.service
[root@k8s-master01 ~]# systemctl status systemd-modules-load.service
● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Thu 2023-06-01 10:31:08 CST; 24h ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
    Process: 612 ExecStart=/usr/lib/systemd/systemd-modules-load (code=exited, status=0/SUCCESS)
   Main PID: 612 (code=exited, status=0/SUCCESS)
        CPU: 316ms

Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ip_vs_rr'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ip_vs_wrr'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ip_vs_sh'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ip_tables'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ip_set'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'xt_set'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ipt_rpfilter'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ipt_REJECT'
Jun 01 10:31:08 k8s-master01 systemd-modules-load[612]: Inserted module 'ipip'
Jun 01 10:31:08 k8s-master01 systemd[1]: Finished Load Kernel Modules.
[root@k8s-master01 ~]# cat /usr/lib/systemd/system/systemd-modules-load.service
#  SPDX-License-Identifier: LGPL-2.1-or-later
#
#  This file is part of systemd.
#
#  systemd is free software; you can redistribute it and/or modify it
#  under the terms of the GNU Lesser General Public License as published by
#  the Free Software Foundation; either version 2.1 of the License, or
#  (at your option) any later version.

[Unit]
Description=Load Kernel Modules
Documentation=man:systemd-modules-load.service(8) man:modules-load.d(5)
DefaultDependencies=no
Conflicts=shutdown.target
Before=sysinit.target shutdown.target
ConditionCapability=CAP_SYS_MODULE
ConditionDirectoryNotEmpty=|/lib/modules-load.d
ConditionDirectoryNotEmpty=|/usr/lib/modules-load.d
ConditionDirectoryNotEmpty=|/usr/local/lib/modules-load.d
ConditionDirectoryNotEmpty=|/etc/modules-load.d
ConditionDirectoryNotEmpty=|/run/modules-load.d
ConditionKernelCommandLine=|modules-load
ConditionKernelCommandLine=|rd.modules-load

[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/lib/systemd/systemd-modules-load
TimeoutSec=90s
[root@k8s-master01 ~]#

3.1.5. 配置containerd所需内核参数

[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

# 加载内核
[root@k8s-master01 ~]# sysctl --system

3.1.6. 创建配置文件

配置文件中修改sandbox_image的源地址需要注意,因为有些地址无法下载,需要通过下面的方法进行修改。
请使用docker search pause进行查找,找到国内可下载的源地址。

查找容器仓库,我找选择了dyrnq的仓库。请根据查询到的结果进行选择

[root@master ~]# docker search pause
NAME                                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
rancher/pause                                                                        6
rancher/pause-amd64                                                                  4
google/pause                                                                         6
kubernetes/pause                     restore from backup of kubernetes/pause         13
docker/desktop-kubernetes-pause      Mirror of selected tags from k8s.gcr.io/pause   1
almalinux/pause                                                                      0
ibmcom/pause                         Docker Image for IBM Cloud private-CE (Commu…   4
kubeedge/pause                       pause container image.                          1
pauseyyf/pause                                                                       0
igneoussystems/pause                 copy of kubernetes/pause                        0
ibmcom/pause-ppc64le                 Docker Image for IBM Cloud Private-CE (Commu…   1
pauseyop/pause                                                                       0
mirrorgooglecontainers/pause-amd64                                                   22
easzlab/pause-amd64                  from gcr.io/google_containers/pause-amd64       1
lc13579443/pause-amd64               pause-amd64                                     1                    [OK]
catalystcloud/pause                                                                  0
kubesphere/pause                                                                     2
wyliog/pause-amd64                                                                   0
dyrnq/pause                          registry.k8s.io/pause                           1
dlws/pause-amd64                                                                     0
e2eteam/pause                                                                        0
mirrorgooglecontainers/pause                                                         3
banzaicloud/pause                                                                    0
sapcc/pause-amd64                                                                    0
linode/pause                                                                         0
[root@master ~]#

配置文件

[root@k8s-master01 ~]# mkdir -pv /etc/containerd
mkdir: created directory '/etc/containerd'
[root@k8s-master01 ~]# containerd config default | tee /etc/containerd/config.toml
[root@k8s-master01 ~]# sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
[root@k8s-master01 ~]# cat /etc/containerd/config.toml | grep SystemdCgroup
            SystemdCgroup = true
[root@k8s-master01 ~]# sed -i "s#registry.k8s.io#dyrnq#g" /etc/containerd/config.toml
[root@k8s-master01 ~]# cat /etc/containerd/config.toml | grep sandbox_image
    sandbox_image = "dyrnq/pause:3.8"
[root@k8s-master01 ~]# sed -i "s#config_path\ \=\ \"\"#config_path\ \=\ \"/etc/containerd/certs.d\"#g" /etc/containerd/config.toml
[root@k8s-master01 ~]# cat /etc/containerd/config.toml | grep certs.d
      config_path = "/etc/containerd/certs.d"
    config_path = "/etc/containerd/certs.d"
[root@k8s-master01 ~]# mkdir /etc/containerd/certs.d/docker.io -pv
mkdir: created directory '/etc/containerd/certs.d'
mkdir: created directory '/etc/containerd/certs.d/docker.io'
[root@k8s-master01 ~]# cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://hub-mirror.c.163.com",host."https://l8bqm6qk.mirror.aliyuncs.com"]
   capabilities = ["pull", "resolve"]
EOF
[root@k8s-master01 ~]# systemctl daemon-reload
[root@k8s-master01 ~]# systemctl enable --now containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.
[root@k8s-master01 ~]# systemctl restart containerd
[root@k8s-master01 ~]# systemctl status containerd

3.1.7. 配置crictl客户端连接的运行时设置

[root@k8s-master01 ~]# tar xf /root/crictl-v*-linux-amd64.tar.gz -C /usr/bin/
[root@k8s-master01 ~]# cat > /etc/crictl.yaml <<EOF
> runtime-endpoint: unix:///run/containerd/containerd.sock
> image-endpoint: unix:///run/containerd/containerd.sock
> timeout: 10
> debug: false
> EOF
[root@k8s-master01 ~]# 
[root@k8s-master01 ~]# systemctl restart  containerd
[root@k8s-master01 ~]# crictl info

3.2. k8s与etcd安装

3.3. 解压与安装{仅在master01上操作}

[root@k8s-master01 cby]# tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
[root@k8s-master01 cby]#
[root@k8s-master01 cby]# tar -zxvf etcd-v3.5.9-linux-amd64.tar.gz  
[root@k8s-master01 cby]# mv etcd-v3.5.9-linux-amd64/etcd /usr/local/bin/
[root@k8s-master01 cby]# mv etcd-v3.5.9-linux-amd64/etcdctl  /usr/local/bin/
[root@k8s-master01 cby]# ls /usr/local/bin/
containerd       containerd-shim-runc-v1  containerd-stress  critest      ctr   etcdctl         kube-controller-manager  kubelet     kube-scheduler
containerd-shim  containerd-shim-runc-v2  crictl             ctd-decoder  etcd  kube-apiserver  kubectl                  kube-proxy
[root@k8s-master01 cby]# kubelet --version 
Kubernetes v1.27.1
[root@k8s-master01 cby]# etcdctl version 
etcdctl version: 3.5.9
API version: 3.5
[root@k8s-master01 cby]#

3.3.1. 发送到其他节点

[root@k8s-master01 cby]# Master='k8s-master02 k8s-master03'
[root@k8s-master01 cby]# Work='k8s-node01 k8s-node02'
[root@k8s-master01 cby]# 
[root@k8s-master01 cby]# for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
[root@k8s-master01 cby]# for NODE in $Work; do     scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

3.3.2. 创建证书相关文件

3.3.2.1. pki
mkdir: created directory 'pki'
[root@k8s-master01 kubernetes]# cd pki
[root@k8s-master01 pki]# cat > admin-csr.json << EOF 
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF 
[root@k8s-master01 pki]# cat > ca-config.json << EOF 
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF
[root@k8s-master01 pki]# cat > etcd-ca-csr.json  << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF
[root@k8s-master01 pki]# cat > front-proxy-ca-csr.json  << EOF 
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  },
  "ca": {
    "expiry": "876000h"
  }
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > kubelet-csr.json  << EOF 
{
  "CN": "system:node:\$NODE",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Beijing",
      "ST": "Beijing",
      "O": "system:nodes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
[root@k8s-master01 pki]# cat > manager-csr.json << EOF 
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > apiserver-csr.json << EOF 
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > ca-csr.json   << EOF 
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > etcd-csr.json << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > front-proxy-client-csr.json  << EOF 
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > kube-proxy-csr.json  << EOF 
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-proxy",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cat > scheduler-csr.json << EOF 
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
[root@k8s-master01 pki]# 
3.3.2.2. bootstrap
[root@k8s-master01 kubernetes]# mkdir -pv bootstrap
mkdir: created directory 'bootstrap'
[root@k8s-master01 kubernetes]# cd bootstrap
[root@k8s-master01 bootstrap]# head -c 6 /dev/urandom | od -An -t x | tr -d ' '
c9ebc12f00009c35
[root@k8s-master01 bootstrap]# cat > bootstrap.secret.yaml << EOF 
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: c9ebc12f00009c35
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF
[root@k8s-master01 bootstrap]# 
3.3.2.3. coredns
[root@k8s-master01 kubernetes]# mkdir -pv coredns
mkdir: created directory 'coredns'
[root@k8s-master01 kubernetes]# cd coredns
[root@k8s-master01 coredns]#  
[root@k8s-master01 coredns]# cat > coredns.yaml << EOF 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: calico/coredns:v1.10.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 192.168.200.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
  
EOF
3.3.2.4. metrics-server
[root@k8s-master01 kubernetes]# mkdir -pv  metrics-server
mkdir: created directory 'metrics-server'
[root@k8s-master01 kubernetes]# cd metrics-server
[root@k8s-master01 metrics-server]# cat > metrics-server.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra-
        image: calico/metrics-server:v0.6.3
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki

---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100
  
EOF

4. 证书生成工具

在master01上进行操作

[root@k8s-master01 cby]# cp cfssl_1.6.4_linux_amd64 /usr/local/bin/cfssl
[root@k8s-master01 cby]# cp cfssljson_1.6.4_linux_amd64 /usr/local/bin/cfssljson
[root@k8s-master01 cby]# cp cfssl-certinfo_1.6.4_linux_amd64 /usr/local/bin/cfssl-certinfo
[root@k8s-master01 cby]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo 
[root@k8s-master01 cby]# cfssl version 
Version: 1.6.4
Runtime: go1.18
[root@k8s-master01 metrics-server]# cfssljson -version
Version: 1.6.4
Runtime: go1.18

4.1. 生成etcd证书

4.1.1. 在所有master节点上操作

在master01上生成etcd证书

  • 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
[root@k8s-master01 cby]# mkdir /etc/etcd/ssl -pv
mkdir: created directory '/etc/etcd'
mkdir: created directory '/etc/etcd/ssl'
[root@k8s-master01 cby]# 
[root@k8s-master01 cby]# cd ..
[root@k8s-master01 kubernetes]# ls
bootstrap  cby     coredns       csi-hostpath  ingress-yaml            metrics-server  snapshotter
calico     cilium  coredns-yaml  dashboard     kubeadm-metrics-server  pki
[root@k8s-master01 kubernetes]# cd pki/
[root@k8s-master01 pki]# ls 
admin-csr.json      ca-config.json  etcd-ca-csr.json  front-proxy-ca-csr.json      kubelet-csr.json     manager-csr.json
apiserver-csr.json  ca-csr.json     etcd-csr.json     front-proxy-client-csr.json  kube-proxy-csr.json  scheduler-csr.json
[root@k8s-master01 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
2023/05/30 16:06:27 [INFO] generating a new CA key and certificate from CSR
2023/05/30 16:06:27 [INFO] generate received request
2023/05/30 16:06:27 [INFO] received CSR
2023/05/30 16:06:27 [INFO] generating key: rsa-2048
2023/05/30 16:06:27 [INFO] encoded CSR
2023/05/30 16:06:27 [INFO] signed certificate with serial number 70528097076440131624359059234566212793915173444
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cfssl gencert \
>    -ca=/etc/etcd/ssl/etcd-ca.pem \
>    -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
>    -config=ca-config.json \
>    -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.1.11,192.168.1.12,192.168.1.13,192.168.1.14,192.168.1.15 \
>    -profile=kubernetes \
>    etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
2023/05/30 16:09:16 [INFO] generate received request
2023/05/30 16:09:16 [INFO] received CSR
2023/05/30 16:09:16 [INFO] generating key: rsa-2048
2023/05/30 16:09:16 [INFO] encoded CSR
2023/05/30 16:09:16 [INFO] signed certificate with serial number 248129067188441735912739568354554758895102402787
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 

4.1.2. 拷贝到其他master节点

[root@k8s-master01 pki]# Master='k8s-master02 k8s-master03'
[root@k8s-master01 pki]# for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done  
[root@k8s-master01 pki]#

4.2. 生成k8s相关证书

4.2.1. 在所有master、node节点上创建存放目录

[root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/pki
mkdir: created directory '/etc/kubernetes'
mkdir: created directory '/etc/kubernetes/pki'
[root@k8s-master01 pki]# 

4.2.2. 在master01上生成k8s证书

[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
2023/05/30 16:10:46 [INFO] generating a new CA key and certificate from CSR
2023/05/30 16:10:46 [INFO] generate received request
2023/05/30 16:10:46 [INFO] received CSR
2023/05/30 16:10:46 [INFO] generating key: rsa-2048
2023/05/30 16:10:46 [INFO] encoded CSR
2023/05/30 16:10:46 [INFO] signed certificate with serial number 574071817798203017627587862320271427815435810683
[root@k8s-master01 pki]# 

# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
# 192.168.200.1是service网段的第一个地址,需要计算,192.168.1.51为高可用vip地址

[root@k8s-master01 pki]# cfssl gencert   \
> -ca=/etc/kubernetes/pki/ca.pem   \
> -ca-key=/etc/kubernetes/pki/ca-key.pem   \
> -config=ca-config.json   \
> -hostname=192.168.200.1,192.168.1.51,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.1.11,192.168.1.12,192.168.1.13,192.168.1.14,192.168.1.15,192.168.1.21,192.168.1.22,192.168.1.23,192.168.1.24,192.168.1.25   \
> -profile=kubernetes   apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
2023/05/30 16:13:35 [INFO] generate received request
2023/05/30 16:13:35 [INFO] received CSR
2023/05/30 16:13:35 [INFO] generating key: rsa-2048
2023/05/30 16:13:36 [INFO] encoded CSR
2023/05/30 16:13:36 [INFO] signed certificate with serial number 658274562220013957688165987048475892160489307457
[root@k8s-master01 pki]# 

4.2.3. 生成apiserver聚合证书

[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json  | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
2023/05/30 16:14:48 [INFO] generating a new CA key and certificate from CSR
2023/05/30 16:14:48 [INFO] generate received request
2023/05/30 16:14:48 [INFO] received CSR
2023/05/30 16:14:48 [INFO] generating key: rsa-2048
2023/05/30 16:14:48 [INFO] encoded CSR
2023/05/30 16:14:48 [INFO] signed certificate with serial number 8630292230465875024539278408893194099227737614
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cfssl gencert  \
> -ca=/etc/kubernetes/pki/front-proxy-ca.pem   \
> -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem   \
> -config=ca-config.json   \
> -profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
2023/05/30 16:15:05 [INFO] generate received request
2023/05/30 16:15:05 [INFO] received CSR
2023/05/30 16:15:05 [INFO] generating key: rsa-2048
2023/05/30 16:15:05 [INFO] encoded CSR
2023/05/30 16:15:05 [INFO] signed certificate with serial number 303669102784616323062144323184496796745641083627
2023/05/30 16:15:05 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# 

4.2.4. 生成controller-manage的证书

在后面步骤会选择使用那种高可用方案,有两种可选方案。

  • haproxy、keepalived 那么为 --server=https://192.168.1.51:8443
  • nginx方案,那么为 --server=https://127.0.0.1:8443
[root@k8s-master01 pki]# cfssl gencert \
>    -ca=/etc/kubernetes/pki/ca.pem \
>    -ca-key=/etc/kubernetes/pki/ca-key.pem \
>    -config=ca-config.json \
>    -profile=kubernetes  manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
2023/05/30 16:18:00 [INFO] generate received request
2023/05/30 16:18:00 [INFO] received CSR
2023/05/30 16:18:00 [INFO] generating key: rsa-2048
2023/05/30 16:18:00 [INFO] encoded CSR
2023/05/30 16:18:00 [INFO] signed certificate with serial number 155976945894188104386508452003412264154900957782
2023/05/30 16:18:00 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# 

# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.1.51:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`

[root@k8s-master01 pki]#   kubectl config set-cluster kubernetes \
>      --certificate-authority=/etc/kubernetes/pki/ca.pem \
>      --embed-certs=true \
>      --server=https://192.168.1.51:8443 \
>      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]# 

# 设置一个环境项,一个上下文

[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes \
>     --cluster=kubernetes \
>     --user=system:kube-controller-manager \
>     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
Context "system:kube-controller-manager@kubernetes" created.
[root@k8s-master01 pki]# 

# 设置一个用户项

[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager \
>      --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
>      --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
>      --embed-certs=true \
>      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
User "system:kube-controller-manager" set.
[root@k8s-master01 pki]# 

# 设置默认环境

[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \
>      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
Switched to context "system:kube-controller-manager@kubernetes".
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# cfssl gencert \
>    -ca=/etc/kubernetes/pki/ca.pem \
>    -ca-key=/etc/kubernetes/pki/ca-key.pem \
>    -config=ca-config.json \
>    -profile=kubernetes \
>    scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
2023/05/30 16:21:15 [INFO] generate received request
2023/05/30 16:21:15 [INFO] received CSR
2023/05/30 16:21:15 [INFO] generating key: rsa-2048
2023/05/30 16:21:16 [INFO] encoded CSR
2023/05/30 16:21:16 [INFO] signed certificate with serial number 513889285908077142167858220511113123828996219720
2023/05/30 16:21:16 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#

4.2.5. 生成scheduler证书

  • 使用 haproxy、keepalived 那么为 --server=https://192.168.1.51:8443
  • 使用 nginx方案,那么为 --server=https://127.0.0.1:8443
 [root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
>      --certificate-authority=/etc/kubernetes/pki/ca.pem \
>      --embed-certs=true \
>      --server=https://192.168.1.51:8443 \
>      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-scheduler \
>      --client-certificate=/etc/kubernetes/pki/scheduler.pem \
>      --client-key=/etc/kubernetes/pki/scheduler-key.pem \
>      --embed-certs=true \
>      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
User "system:kube-scheduler" set.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-context system:kube-scheduler@kubernetes \
>      --cluster=kubernetes \
>      --user=system:kube-scheduler \
>      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Context "system:kube-scheduler@kubernetes" created.
[root@k8s-master01 pki]# 

[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \
>      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Switched to context "system:kube-scheduler@kubernetes".
[root@k8s-master01 pki]#

[root@k8s-master01 pki]# cfssl gencert \
>    -ca=/etc/kubernetes/pki/ca.pem \
>    -ca-key=/etc/kubernetes/pki/ca-key.pem \
>    -config=ca-config.json \
>    -profile=kubernetes \
>    admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
2023/05/30 16:26:11 [INFO] generate received request
2023/05/30 16:26:11 [INFO] received CSR
2023/05/30 16:26:11 [INFO] generating key: rsa-2048
2023/05/30 16:26:11 [INFO] encoded CSR
2023/05/30 16:26:11 [INFO] signed certificate with serial number 486734444855951345260685497901930525877841269948
2023/05/30 16:26:11 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# 

4.2.6. 生成admin证书

  • 使用 haproxy、keepalived 那么为 --server=https://192.168.1.51:8443
  • 使用 nginx方案,那么为 --server=https://127.0.0.1:8443
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes     \
>   --certificate-authority=/etc/kubernetes/pki/ca.pem     \
>   --embed-certs=true     \
>   --server=https://192.168.1.51:8443     \
>   --kubeconfig=/etc/kubernetes/admin.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-credentials kubernetes-admin  \
>   --client-certificate=/etc/kubernetes/pki/admin.pem     \
>   --client-key=/etc/kubernetes/pki/admin-key.pem     \
>   --embed-certs=true     \
>   --kubeconfig=/etc/kubernetes/admin.kubeconfig
User "kubernetes-admin" set.
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl config set-context kubernetes-admin@kubernetes    \
>   --cluster=kubernetes     \
>   --user=kubernetes-admin     \
>   --kubeconfig=/etc/kubernetes/admin.kubeconfig
Context "kubernetes-admin@kubernetes" created.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config use-context kubernetes-admin@kubernetes  --kubeconfig=/etc/kubernetes/admin.kubeconfig
Switched to context "kubernetes-admin@kubernetes".
[root@k8s-master01 pki]# 

4.2.7. 创建kube-proxy证书

[root@k8s-master01 pki]# cfssl gencert \
>    -ca=/etc/kubernetes/pki/ca.pem \
>    -ca-key=/etc/kubernetes/pki/ca-key.pem \
>    -config=ca-config.json \
>    -profile=kubernetes \
>    kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
2023/05/30 16:31:22 [INFO] generate received request
2023/05/30 16:31:22 [INFO] received CSR
2023/05/30 16:31:22 [INFO] generating key: rsa-2048
2023/05/30 16:31:22 [INFO] encoded CSR
2023/05/30 16:31:22 [INFO] signed certificate with serial number 35526724972729304166330688726371526418998211500
2023/05/30 16:31:22 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes     \
>   --certificate-authority=/etc/kubernetes/pki/ca.pem     \
>   --embed-certs=true     \
>   --server=https://192.168.1.51:8443     \
>   --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-credentials kube-proxy  \
>   --client-certificate=/etc/kubernetes/pki/kube-proxy.pem     \
>   --client-key=/etc/kubernetes/pki/kube-proxy-key.pem     \
>   --embed-certs=true     \
>   --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
User "kube-proxy" set.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config set-context kube-proxy@kubernetes    \
>   --cluster=kubernetes     \
>   --user=kube-proxy     \
>   --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
Context "kube-proxy@kubernetes" created.
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# kubectl config use-context kube-proxy@kubernetes  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
Switched to context "kube-proxy@kubernetes".
[root@k8s-master01 pki]# 

4.2.8. 创建ServiceAccount Key – secret

[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# 
[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
writing RSA key
[root@k8s-master01 pki]#

4.2.9. 将证书发送到其他的master节点

[root@k8s-master01 pki]# mkdir  /etc/kubernetes/pki/ -p
[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do  for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do  scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done;  for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do  scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done

4.2.10. 查看证书

[root@k8s-master01 pki]# ls /etc/kubernetes/pki/
admin.csr      apiserver-key.pem  ca.pem                      front-proxy-ca.csr      front-proxy-client-key.pem  kube-proxy.pem  scheduler-key.pem
admin-key.pem  apiserver.pem      controller-manager.csr      front-proxy-ca-key.pem  front-proxy-client.pem      sa.key          scheduler.pem
admin.pem      ca.csr             controller-manager-key.pem  front-proxy-ca.pem      kube-proxy.csr              sa.pub
apiserver.csr  ca-key.pem         controller-manager.pem      front-proxy-client.csr  kube-proxy-key.pem          scheduler.csr
[root@k8s-master01 pki]# ls /etc/kubernetes/pki/ |wc -l
26
[root@k8s-master01 pki]#

5. k8s组件配置

5.1. etcd配置

5.1.1. master01配置

cat > /etc/etcd/etcd.config.yml << EOF 
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.11:2380'
listen-client-urls: 'https://192.168.1.11:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.11:2380'
advertise-client-urls: 'https://192.168.1.11:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.11:2380,k8s-master02=https://192.168.1.12:2380,k8s-master03=https://192.168.1.13:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

5.1.2. master02配置

cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.12:2380'
listen-client-urls: 'https://192.168.1.12:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.12:2380'
advertise-client-urls: 'https://192.168.1.12:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.11:2380,k8s-master02=https://192.168.1.12:2380,k8s-master03=https://192.168.1.13:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

5.1.3. master03配置

cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.13:2380'
listen-client-urls: 'https://192.168.1.13:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.13:2380'
advertise-client-urls: 'https://192.168.1.13:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.11:2380,k8s-master02=https://192.168.1.12:2380,k8s-master03=https://192.168.1.13:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

5.2. 创建etcd service(所有master节点)

5.2.1. 创建服务

cat > /usr/lib/systemd/system/etcd.service << EOF

[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

EOF

5.2.2. 创建etcd证书目录并启动

[root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/pki/etcd
mkdir: created directory '/etc/kubernetes/pki/etcd'
[root@k8s-master01 pki]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master01 pki]# systemctl daemon-reload
[root@k8s-master01 pki]# systemctl enable --now etcd
Created symlink /etc/systemd/system/etcd3.service → /usr/lib/systemd/system/etcd.service.
Created symlink /etc/systemd/system/multi-user.target.wants/etcd.service → /usr/lib/systemd/system/etcd.service.
[root@k8s-master01 pki]#

5.2.3. 查看etcd状态

[root@k8s-master01 pki]# export ETCDCTL_API=3;etcdctl --endpoints="192.168.1.13:2379,192.168.1.12:2379,192.168.1.11:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.1.13:2379 | 40ba37809e1a423f |   3.5.9 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| 192.168.1.12:2379 |  ac7e57d44f030e8 |   3.5.9 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| 192.168.1.11:2379 | ace8d5b0766b3d92 |   3.5.9 |   20 kB |      true |      false |         2 |          9 |                  9 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@k8s-master01 pki]# 

6. 高可用配置

  • 使用 nginx方案,那么为 --server=https://127.0.0.1:8443
  • 使用 haproxy、keepalived 那么为 --server=https://192.168.0.36:8443

6.1. NGINX高可用方案

所有节点执行

# 安装编译环境
yum install gcc -y

# 下载解压nginx二进制文件
# wget http://nginx.org/download/nginx-1.22.1.tar.gz
tar xvf nginx-*.tar.gz
cd nginx-*

# 进行编译
./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install 

# 拷贝编译好的nginx
node='k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03'
for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done

6.1.1. 写入启动配置

# 写入nginx配置文件
cat > /usr/local/nginx/conf/kube-nginx.conf <<EOF
worker_processes 1;
events {
    worker_connections  1024;
}
stream {
    upstream backend {
    	least_conn;
        hash $remote_addr consistent;
        server 192.168.0.31:6443        max_fails=3 fail_timeout=30s;
        server 192.168.0.32:6443        max_fails=3 fail_timeout=30s;
        server 192.168.0.33:6443        max_fails=3 fail_timeout=30s;
    }
    server {
        listen 127.0.0.1:8443;
        proxy_connect_timeout 1s;
        proxy_pass backend;
    }
}
EOF

# 写入启动配置文件
cat > /etc/systemd/system/kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx
ExecReload=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

# 设置开机自启
systemctl enable --now  kube-nginx 
systemctl restart kube-nginx
systemctl status kube-nginx

6.2. keepalived和haproxy高可用方案

6.2.1. 安装keepalived和haproxy服务

本次环境中,复用master节点作为高可用节点,安装跟部署keeplivedhaproxy

[root@k8s-master01 pki]# yum -y install keepalived haproxy
Updating Subscription Management repositories.
Unable to read consumer identity

This system is not registered with an entitlement server. You can use subscription-manager to register.

Last metadata expiration check: 3:01:36 ago on Tue 30 May 2023 01:57:40 PM CST.
Dependencies resolved.
=====================================================================================================================================================
 Package                                        Architecture               Version                              Repository                      Size
=====================================================================================================================================================
Installing:
 haproxy                                        x86_64                     2.4.17-6.el9                         aliyun-app                     2.2 M
 keepalived                                     x86_64                     2.2.4-6.el9                          aliyun-app                     548 k
Installing dependencies:
 mariadb-connector-c                            x86_64                     3.2.6-1.el9                          aliyun-app                     198 k
 mariadb-connector-c-config                     noarch                     3.2.6-1.el9                          aliyun-app                      11 k
 net-snmp-agent-libs                            x86_64                     1:5.9.1-9.el9                        aliyun-app                     694 k
 net-snmp-libs                                  x86_64                     1:5.9.1-9.el9                        aliyun-app                     756 k

Transaction Summary
=====================================================================================================================================================
Install  6 Packages

Total download size: 4.3 M
Installed size: 14 M
Downloading Packages:
(1/6): mariadb-connector-c-3.2.6-1.el9.x86_64.rpm                                                                    286 kB/s | 198 kB     00:00    
(2/6): mariadb-connector-c-config-3.2.6-1.el9.noarch.rpm                                                              69 kB/s |  11 kB     00:00    
(3/6): keepalived-2.2.4-6.el9.x86_64.rpm                                                                             354 kB/s | 548 kB     00:01    
(4/6): net-snmp-agent-libs-5.9.1-9.el9.x86_64.rpm                                                                    458 kB/s | 694 kB     00:01    
(5/6): net-snmp-libs-5.9.1-9.el9.x86_64.rpm                                                                          496 kB/s | 756 kB     00:01    
(6/6): haproxy-2.4.17-6.el9.x86_64.rpm                                                                               432 kB/s | 2.2 MB     00:05    
-----------------------------------------------------------------------------------------------------------------------------------------------------
Total                                                                                                                860 kB/s | 4.3 MB     00:05     
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
  Preparing        :                                                                                                                             1/1 
  Installing       : net-snmp-libs-1:5.9.1-9.el9.x86_64                                                                                          1/6 
  Installing       : mariadb-connector-c-config-3.2.6-1.el9.noarch                                                                               2/6 
  Installing       : mariadb-connector-c-3.2.6-1.el9.x86_64                                                                                      3/6 
  Installing       : net-snmp-agent-libs-1:5.9.1-9.el9.x86_64                                                                                    4/6 
  Installing       : keepalived-2.2.4-6.el9.x86_64                                                                                               5/6 
  Running scriptlet: keepalived-2.2.4-6.el9.x86_64                                                                                               5/6 
  Running scriptlet: haproxy-2.4.17-6.el9.x86_64                                                                                                 6/6 
  Installing       : haproxy-2.4.17-6.el9.x86_64                                                                                                 6/6 
  Running scriptlet: haproxy-2.4.17-6.el9.x86_64                                                                                                 6/6 
  Verifying        : haproxy-2.4.17-6.el9.x86_64                                                                                                 1/6 
  Verifying        : keepalived-2.2.4-6.el9.x86_64                                                                                               2/6 
  Verifying        : mariadb-connector-c-3.2.6-1.el9.x86_64                                                                                      3/6 
  Verifying        : mariadb-connector-c-config-3.2.6-1.el9.noarch                                                                               4/6 
  Verifying        : net-snmp-agent-libs-1:5.9.1-9.el9.x86_64                                                                                    5/6 
  Verifying        : net-snmp-libs-1:5.9.1-9.el9.x86_64                                                                                          6/6 
Installed products updated.

Installed:
  haproxy-2.4.17-6.el9.x86_64                          keepalived-2.2.4-6.el9.x86_64                   mariadb-connector-c-3.2.6-1.el9.x86_64       
  mariadb-connector-c-config-3.2.6-1.el9.noarch        net-snmp-agent-libs-1:5.9.1-9.el9.x86_64        net-snmp-libs-1:5.9.1-9.el9.x86_64           

Complete!
[root@k8s-master01 pki]#

6.2.2. 修改haproxy配置文件(所有节点配置文件一致)

# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s


frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:8443
 bind 127.0.0.1:8443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master


backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  k8s-master01  192.168.1.11:6443 check
 server  k8s-master02  192.168.1.12:6443 check
 server  k8s-master03  192.168.1.13:6443 check
EOF

6.2.3. Master01上配置keeplived master节点

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state MASTER  # 主节点为MASTER,备节点为BACKUP;同一套环境只运行存在一个MASTER
    interface ens16 # 网卡名称
    mcast_src_ip 192.168.1.11 # 本机地址
    virtual_router_id 51 # 路由ID,同一套环境中必须唯一
    priority 100 # 权重;优先级;多个节点的优先级不同
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.51 # 浮动地址
    }
    track_script {
      chk_apiserver
    }
}
EOF

6.2.4. Master02上配置keeplived

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    interface ens16
    mcast_src_ip 192.168.1.12
    virtual_router_id 51
    priority 80
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.51
    }
    track_script {
      chk_apiserver
    }
}

EOF

6.2.5. Master03上配置keeplived

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    interface ens16
    mcast_src_ip 192.168.1.13
    virtual_router_id 51
    priority 50
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.51
    }
    track_script {
      chk_apiserver
    }
}

EOF

6.2.6. 健康检查脚本配置(haroxy、keepalived节点)

cat >  /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash

err=0
for k in \$(seq 1 3)
do
    check_code=\$(pgrep haproxy)
    if [[ \$check_code == "" ]]; then
        err=\$(expr \$err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ \$err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

# 给脚本授权

chmod +x /etc/keepalived/check_apiserver.sh

6.2.7. 启动服务

[root@k8s-master01 pki]# systemctl daemon-reload
[root@k8s-master01 pki]# systemctl enable --now haproxy
Created symlink /etc/systemd/system/multi-user.target.wants/haproxy.service → /usr/lib/systemd/system/haproxy.service.
[root@k8s-master01 pki]# systemctl enable --now keepalived
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@k8s-master01 pki]# 

6.2.8. 测试高可用

[root@k8s-master01 pki]# ping 192.168.1.51
PING 192.168.1.51 (192.168.1.51) 56(84) bytes of data.
64 bytes from 192.168.1.51: icmp_seq=1 ttl=64 time=0.045 ms
^C
--- 192.168.1.51 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.045/0.045/0.045/0.000 ms
[root@k8s-master01 pki]#

7. k8s组件配置

所有节点创建以下目录

[root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
mkdir: created directory '/etc/kubernetes/manifests/'
mkdir: created directory '/var/lib/kubelet'
[root@k8s-master01 pki]#

7.1. 创建apiserver(所有master节点)

7.1.1. master01节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=192.168.1.11 \
      --service-cluster-ip-range=192.168.200.0/22  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota        --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User \
      --enable-aggregator-routing=true
      # --feature-gates=IPv6DualStack=true
      # --token-auth-file=/etc/kubernetes/token.csv

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

7.1.2. master02节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=192.168.1.12 \
      --service-cluster-ip-range=192.168.200.0/22  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User \
      --enable-aggregator-routing=true
      # --feature-gates=IPv6DualStack=true
      # --token-auth-file=/etc/kubernetes/token.csv

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

7.1.3. master03节点配置

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=192.168.1.13 \
      --service-cluster-ip-range=192.168.200.0/22  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://192.168.1.11:2379,https://192.168.1.12:2379,https://192.168.1.13:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User \
      --enable-aggregator-routing=true
      # --feature-gates=IPv6DualStack=true
      # --token-auth-file=/etc/kubernetes/token.csv

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

7.1.4. 启动apiserver(所有master节点)

[root@k8s-master01 pki]# systemctl daemon-reload && systemctl enable --now kube-apiserver
Created symlink /etc/systemd/system/multi-user.target.wants/kube-apiserver.service → /usr/lib/systemd/system/kube-apiserver.service.
[root@k8s-master01 pki]# systemctl status kube-apiserver

7.2. 配置kube-controller-manager service

  • 所有master节点配置,且配置相同
  • 192.168.204.0/22为pod网段,按需求设置你自己的网段
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --bind-address=0.0.0.0 \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --service-cluster-ip-range=192.168.200.0/22 \
      --cluster-cidr=192.168.204.0/22 \
      --node-cidr-mask-size-ipv4=24 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
      # --feature-gates=IPv6DualStack=true

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

7.2.1. 启动kube-controller-manager,并查看状态

[root@k8s-master01 pki]# systemctl daemon-reload;systemctl enable --now kube-controller-manager;systemctl  status kube-controller-manager

7.3. 配置kube-scheduler service

7.3.1. 所有master配置,且配置相同

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
      --v=2 \\
      --bind-address=0.0.0.0 \\
      --leader-elect=true \\
      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

7.3.2. 启动并查看服务状态

[root@k8s-master01 pki]# systemctl daemon-reload;systemctl enable --now kube-scheduler; systemctl status kube-scheduler

8. TLS Bootstrapping配置

8.1. 在master01上配置

  • 使用 haproxy、keepalived 那么为 --server=https://192.168.0.36:8443
  • 使用 nginx方案,那么为 --server=https://127.0.0.1:8443
[root@k8s-master01 kubernetes]# cd bootstrap
[root@k8s-master01 bootstrap]# kubectl config set-cluster kubernetes     \
 --certificate-authority=/etc/kubernetes/pki/ca.pem     \
 --embed-certs=true     --server=https://192.168.1.51:8443     \
 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 bootstrap]#

# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改 
 
[root@k8s-master01 bootstrap]# kubectl config set-credentials tls-bootstrap-token-user     \
 --token=c8ad9c.c9ebc12f00009c35 \
 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
User "tls-bootstrap-token-user" set.
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# kubectl config set-context tls-bootstrap-token-user@kubernetes     \
 --cluster=kubernetes     \
 --user=tls-bootstrap-token-user     \
 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Context "tls-bootstrap-token-user@kubernetes" created.
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# kubectl config use-context tls-bootstrap-token-user@kubernetes     \
 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Switched to context "tls-bootstrap-token-user@kubernetes".
[root@k8s-master01 bootstrap]# 

# /etc/kubernetes/admin.kubeconfig拷贝到/root/.kube/下,并重命名为config。否则kubelet命令将无法使用。
# 如果需要在其他master节点上使用kubelet,则需要将config拷贝过去,否则将无法使用。
[root@k8s-master01 bootstrap]# mkdir -pv /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
mkdir: created directory '/root/.kube'
[root@k8s-master01 bootstrap]# 

[root@k8s-master01 .kube]# scp /etc/kubernetes/admin.kubeconfig root@k8s-master02:/root/.kube/config
[root@k8s-master01 .kube]# scp /etc/kubernetes/admin.kubeconfig root@k8s-master03:/root/.kube/config

8.2. 查看集群状态,没问题的话继续后续操作

[root@k8s-master01 bootstrap]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok                              
scheduler            Healthy   ok                              
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}   
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# 
[root@k8s-master01 bootstrap]# kubectl create -f bootstrap.secret.yaml
secret/bootstrap-token-c8ad9c created
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
[root@k8s-master01 bootstrap]# 

9. node节点配置

9.1. 在master01上将证书复制到node节点

[root@k8s-master01 bootstrap]# cd /etc/kubernetes/
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done

9.2. kubelet配置

9.2.1. 使用docker作为Runtime(本次未使用)

cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime-endpoint=unix:///run/cri-dockerd.sock  \\
    --node-labels=node.kubernetes.io/node=

[Install]
WantedBy=multi-user.target
EOF

9.2.2. 使用containerd作为Runtime(所有节点都执行)

[root@k8s-master01 kubernetes]# mkdir -pv /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
[root@k8s-master01 kubernetes]# cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet   \\
  --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig  \\
  --config=/etc/kubernetes/kubelet-conf.yml   \\
  --node-labels=node.kubernetes.io/node=
    # --feature-gates=IPv6DualStack=true
    # --container-runtime=remote
    # --runtime-request-timeout=15m
    # --cgroup-driver=systemd

[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master01 kubernetes]# 

9.2.3. 所有k8s节点创建kubelet的配置文件

[root@k8s-master01 kubernetes]# cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 192.168.200.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
[root@k8s-master01 kubernetes]# 

9.2.4. 启动服务

[root@k8s-master01 kubernetes]# systemctl daemon-reload;systemctl restart kubelet;systemctl enable --now kubelet;systemctl status kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
● kubelet.service - Kubernetes Kubelet
     Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; preset: disabled)
     Active: active (running) since Wed 2023-05-31 11:27:36 CST; 328ms ago
       Docs: https://github.com/kubernetes/kubernetes
   Main PID: 121503 (kubelet)
      Tasks: 7 (limit: 22988)
     Memory: 15.8M
        CPU: 324ms
     CGroup: /system.slice/kubelet.service
             └─121503 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubele>

May 31 11:27:36 k8s-master01 systemd[1]: Started Kubernetes Kubelet.
May 31 11:27:36 k8s-master01 kubelet[121503]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config fil>
[root@k8s-master01 kubernetes]# 

9.2.5. 查看集群

[root@k8s-master01 kubernetes]# kubectl  get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    <none>   21m   v1.27.1
k8s-master02   Ready    <none>   21m   v1.27.1
k8s-master03   Ready    <none>   21m   v1.27.1
k8s-node01     Ready    <none>   21m   v1.27.1
k8s-node02     Ready    <none>   21m   v1.27.1
[root@k8s-master01 kubernetes]# 

9.2.6. 查看容器运行时

[root@k8s-master01 kubernetes]# kubectl describe node | grep Runtime
  Container Runtime Version:  containerd://1.7.1
  Container Runtime Version:  containerd://1.7.1
  Container Runtime Version:  containerd://1.7.1
  Container Runtime Version:  containerd://1.7.1
  Container Runtime Version:  containerd://1.7.1
[root@k8s-master01 kubernetes]#

9.3. kube-proxy配置

9.3.1. 将kubeconfig发送到其他节点

[root@k8s-master01 kubernetes]# for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
kube-proxy.kubeconfig                                                                                              100% 6384     6.8MB/s   00:00    
kube-proxy.kubeconfig                                                                                              100% 6384     7.1MB/s   00:00    
[root@k8s-master01 kubernetes]# for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig;  done
kube-proxy.kubeconfig                                                                                              100% 6384     7.0MB/s   00:00    
kube-proxy.kubeconfig                                                                                              100% 6384     7.3MB/s   00:00    
[root@k8s-master01 kubernetes]# 

9.3.2. 在所有k8s节点上配置kube-proxy的service文件

cat >  /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \\
  --config=/etc/kubernetes/kube-proxy.yaml \\
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF

9.3.3. 在所有k8s节点上添加kube-proxy的配置

[root@k8s-master01 kubernetes]# cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 192.168.204.0/22
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF

9.3.4. 启动kube-proxy

[root@k8s-master01 kubernetes]# systemctl daemon-reload;systemctl restart kube-proxy;systemctl enable --now kube-proxy;systemctl status kube-proxy
Created symlink /etc/systemd/system/multi-user.target.wants/kube-proxy.service → /usr/lib/systemd/system/kube-proxy.service.
● kube-proxy.service - Kubernetes Kube Proxy
     Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; preset: disabled)
     Active: active (running) since Wed 2023-05-31 12:51:13 CST; 331ms ago
       Docs: https://github.com/kubernetes/kubernetes
   Main PID: 127132 (kube-proxy)
      Tasks: 7 (limit: 22988)
     Memory: 15.6M
        CPU: 199ms
     CGroup: /system.slice/kube-proxy.service
             ├─127132 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.yaml --v=2
             └─127181 iptables -w 5 -W 100000 -I OUTPUT -t nat -m comment --comment "kubernetes service portals" -j KUBE-SERVICES

May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.766219  127132 shared_informer.go:311] Waiting for caches to sync for service config
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.766281  127132 config.go:315] "Starting node config controller"
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.766292  127132 shared_informer.go:311] Waiting for caches to sync for node config
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.774341  127132 proxier.go:926] "Not syncing ipvs rules until Services and Endpoints >
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.775063  127132 proxier.go:926] "Not syncing ipvs rules until Services and Endpoints >
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.866243  127132 shared_informer.go:318] Caches are synced for endpoint slice config
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.866321  127132 proxier.go:926] "Not syncing ipvs rules until Services and Endpoints >
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.866354  127132 proxier.go:926] "Not syncing ipvs rules until Services and Endpoints >
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.867084  127132 shared_informer.go:318] Caches are synced for node config
May 31 12:51:13 k8s-master01 kube-proxy[127132]: I0531 12:51:13.867189  127132 shared_informer.go:318] Caches are synced for service config
[root@k8s-master01 kubernetes]# 

10. 安装网络插件

建议操作前打快照,有问题可以回滚。
libseccomp版本在2.4以上。

# https://github.com/opencontainers/runc/releases
# 升级runc (所有k8s节点)
[root@k8s-master01 cby]# install -m 755 /root/runc.amd64 /usr/local/sbin/runc
[root@k8s-master01 cby]# cp -p /usr/local/sbin/runc  /usr/local/bin/runc
[root@k8s-master01 cby]# cp -p /usr/local/sbin/runc  /usr/bin/runc
[root@k8s-master01 cby]# whereis runc 
runc: /usr/bin/runc /usr/local/bin/runc /usr/local/sbin/runc
[root@k8s-master01 cby]#

#下载高于2.4以上的包
yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm


#查看当前版本
[root@k8s-master-1 ~]# rpm -qa | grep libseccomp
libseccomp-2.5.1-1.el8.x86_64

10.1. 安装calico

wget https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml

cp calico-typha.yaml calico.yaml

vim calico.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
    },
    - name: IP
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "172.16.0.0/12"
      
# 更改仓库
# 使用docker search 搜索涉及到的镜像地址。

[root@k8s-master01 kubernetes]# sed -i "s#docker.io/calico/#calico/#g" calico.yaml 
[root@k8s-master01 kubernetes]# grep image calico.yaml
          image: calico/cni:master
          imagePullPolicy: IfNotPresent
          image: calico/cni:master
          imagePullPolicy: IfNotPresent
          image: calico/node:master
          imagePullPolicy: IfNotPresent
          image: calico/node:master
          imagePullPolicy: IfNotPresent
          image: calico/kube-controllers:master
          imagePullPolicy: IfNotPresent
      - image: calico/typha:master
        imagePullPolicy: IfNotPresent
[root@k8s-master01 kubernetes]#

# 创建pod
[root@k8s-master01 kubernetes]# kubectl apply -f calico-typha.yaml 
poddisruptionbudget.policy/calico-kube-controllers created
poddisruptionbudget.policy/calico-typha created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin created
service/calico-typha created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created
deployment.apps/calico-typha created
[root@k8s-master01 kubernetes]# 

10.2. 查看容器状态

  • calico 初始化会很慢 需要耐心等待一下,大约十分钟左右
  • 此步骤我卡了很长时间不成功,就是因为镜像仓库下载不下来镜像。
  • 如不成功,参考POD状态查看检查配置,删除pod重新创建
[root@k8s-master01 ~]# kubectl  get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6747f75cdc-fbvvc   1/1     Running   0          61s
kube-system   calico-node-fs7hl                          1/1     Running   0          61s
kube-system   calico-node-jqz58                          1/1     Running   0          61s
kube-system   calico-node-khjlg                          1/1     Running   0          61s
kube-system   calico-node-wmf8q                          1/1     Running   0          61s
kube-system   calico-node-xc6gn                          1/1     Running   0          61s
kube-system   calico-typha-6cdc4b4fbc-57snb              1/1     Running   0          61s

11. 安装CoreDNS

在master01上操作

11.1. 安装helm

因公司网络限制,采用二进制方式安装

# wget https://get.helm.sh/helm-v3.12.0-linux-amd64.tar.gz
[root@k8s-master01 kubernetes]# tar -zxvf helm-v3.12.0-linux-amd64.tar.gz 
linux-amd64/
linux-amd64/helm
linux-amd64/README.md
linux-amd64/LICENSE
[root@k8s-master01 kubernetes]# cd linux-amd64/
[root@k8s-master01 linux-amd64]# ls
helm  LICENSE  README.md
[root@k8s-master01 linux-amd64]# mv helm  /usr/local/bin/helm
[root@k8s-master01 linux-amd64]#

11.2. 下载coreDNS安装包

[root@k8s-master01 cilium]# helm repo add coredns https://coredns.github.io/helm
"coredns" has been added to your repositories
[root@k8s-master01 cilium]#
[root@k8s-master01 coredns]# helm pull coredns/coredns
[root@k8s-master01 coredns]# tar -zxvf coredns-1.24.0.tgz 
coredns/Chart.yaml
coredns/values.yaml
coredns/templates/NOTES.txt
coredns/templates/_helpers.tpl
coredns/templates/clusterrole-autoscaler.yaml
coredns/templates/clusterrole.yaml
coredns/templates/clusterrolebinding-autoscaler.yaml
coredns/templates/clusterrolebinding.yaml
coredns/templates/configmap-autoscaler.yaml
coredns/templates/configmap.yaml
coredns/templates/deployment-autoscaler.yaml
coredns/templates/deployment.yaml
coredns/templates/hpa.yaml
coredns/templates/poddisruptionbudget.yaml
coredns/templates/podsecuritypolicy.yaml
coredns/templates/service-metrics.yaml
coredns/templates/service.yaml
coredns/templates/serviceaccount-autoscaler.yaml
coredns/templates/serviceaccount.yaml
coredns/templates/servicemonitor.yaml
coredns/.helmignore
coredns/README.md
[root@k8s-master01 coredns]# cd coredns/

11.3. 修改配置文件

# 修改IP地址
vim values.yaml
cat values.yaml | grep clusterIP:
clusterIP: "192.168.200.10"

[root@k8s-master01 coredns]# cat values.yaml | grep repository
  repository: coredns/coredns
    repository: coredns/cluster-proportional-autoscaler
[root@k8s-master01 coredns]#

12. 安装Metrics Server

只在master01上操作

12.1. 安装metrics-server

在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率

# 单机版 
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 高可用版本
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml


# 修改配置
vim components.yaml
vim high-availability.yaml

---
# 1
defaultArgs:
  - --cert-dir=/tmp
  - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  - --kubelet-use-node-status-port
  - --metric-resolution=15s
  - --kubelet-insecure-tls
  - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
  - --requestheader-username-headers=X-Remote-User
  - --requestheader-group-headers=X-Remote-Group
  - --requestheader-extra-headers-prefix=X-Remote-Extra-

# 2
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki

# 3
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
---


[root@k8s-master01 kubernetes]# cat high-availability.yaml | grep image
        image: dyrnq/metrics-server:v0.6.3
        imagePullPolicy: IfNotPresent
[root@k8s-master01 kubernetes]#


# 二选一
kubectl apply -f high-availability.yaml
# kubectl apply -f components.yaml

12.2. 稍等片刻查看状态

[root@k8s-master01 kubernetes]# kubectl top  node 
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01   265m         6%     2657Mi          75%       
k8s-master02   203m         5%     2401Mi          67%       
k8s-master03   185m         4%     2422Mi          68%       
k8s-node01     114m         2%     1747Mi          49%       
k8s-node02     165m         4%     1830Mi          51%       
k8s-node03     147m         3%     1667Mi          47%       
[root@k8s-master01 kubernetes]# 

13. 集群验证

13.1. 部署pod资源

cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: docker.io/library/busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

# 查看
kubectl  get pod
NAME      READY   STATUS    RESTARTS   AGE
busybox   1/1     Running   0          17s

13.2. 用pod解析默认命名空间中的kubernetes

kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   17h

kubectl exec  busybox -n default -- nslookup kubernetes
3Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

13.3. 测试跨命名空间是否可以解析

kubectl exec  busybox -n default -- nslookup kube-dns.kube-system
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

13.4. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.

 telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.

curl 10.96.0.10:53
curl: (52) Empty reply from server

13.5. Pod和Pod之前要能通

kubectl get po -owide
NAME      READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          17m   172.27.14.193   k8s-node02   <none>           <none>

 kubectl get po -n kube-system -owide
NAME                                       READY   STATUS    RESTARTS      AGE   IP               NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-5dffd5886b-4blh6   1/1     Running   0             77m   172.25.244.193   k8s-master01   <none>           <none>
calico-node-fvbdq                          1/1     Running   1 (75m ago)   77m   192.168.0.31     k8s-master01   <none>           <none>
calico-node-g8nqd                          1/1     Running   0             77m   192.168.0.34     k8s-node01     <none>           <none>
calico-node-mdps8                          1/1     Running   0             77m   192.168.0.35     k8s-node02     <none>           <none>
calico-node-nf4nt                          1/1     Running   0             77m   192.168.0.33     k8s-master03   <none>           <none>
calico-node-sq2ml                          1/1     Running   0             77m   192.168.0.32     k8s-master02   <none>           <none>
calico-typha-8445487f56-mg6p8              1/1     Running   0             77m   192.168.0.35     k8s-node02     <none>           <none>
calico-typha-8445487f56-pxbpj              1/1     Running   0             77m   192.168.0.31     k8s-master01   <none>           <none>
calico-typha-8445487f56-tnssl              1/1     Running   0             77m   192.168.0.34     k8s-node01     <none>           <none>
coredns-5db5696c7-67h79                    1/1     Running   0             63m   172.25.92.65     k8s-master02   <none>           <none>
metrics-server-6bf7dcd649-5fhrw            1/1     Running   0             61m   172.18.195.1     k8s-master03   <none>           <none>

# 进入busybox ping其他节点上的pod

kubectl exec -ti busybox -- sh
/ # ping 192.168.0.34
PING 192.168.0.34 (192.168.0.34): 56 data bytes
64 bytes from 192.168.0.34: seq=0 ttl=63 time=0.358 ms
64 bytes from 192.168.0.34: seq=1 ttl=63 time=0.668 ms
64 bytes from 192.168.0.34: seq=2 ttl=63 time=0.637 ms
64 bytes from 192.168.0.34: seq=3 ttl=63 time=0.624 ms
64 bytes from 192.168.0.34: seq=4 ttl=63 time=0.907 ms

# 可以连通证明这个pod是可以跨命名空间和跨主机通信的

13.6. 创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)

cat > deployments.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80

EOF

kubectl  apply -f deployments.yaml 
deployment.apps/nginx-deployment created

kubectl  get pod 
NAME                               READY   STATUS    RESTARTS   AGE
busybox                            1/1     Running   0          6m25s
nginx-deployment-9456bbbf9-4bmvk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-9rcdk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-dqv8s   1/1     Running   0          8s

# 删除nginx

[root@k8s-master01 ~]# kubectl delete -f deployments.yaml 

14. 安装dashboard

[root@k8s-master01 kubernetes]# helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
"kubernetes-dashboard" has been added to your repositories
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# 
[root@k8s-master01 kubernetes]# helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system
NAME: kubernetes-dashboard
LAST DEPLOYED: Thu Jun  1 15:39:17 2023
NAMESPACE: kube-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
*********************************************************************************
*** PLEASE BE PATIENT: kubernetes-dashboard may take a few minutes to install ***
*********************************************************************************

Get the Kubernetes Dashboard URL by running:
  export POD_NAME=$(kubectl get pods -n kube-system -l "app.kubernetes.io/name=kubernetes-dashboard,app.kubernetes.io/instance=kubernetes-dashboard" -o jsonpath="{.items[0].metadata.name}")
  echo https://127.0.0.1:8443/
  kubectl -n kube-system port-forward $POD_NAME 8443:8443
[root@k8s-master01 kubernetes]# 

14.1. 更改dashboard的svc为NodePort,如果已是请忽略

kubectl edit svc kubernetes-dashboard -n kube-system

  type: NodePort

14.2. 查看端口号

[root@k8s-master01 kubernetes]# kubectl get svc kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP        EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   192.168.203.103   <none>        443:31716/TCP   2m4s

14.3. 创建token

cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

kubectl  apply -f dashboard-user.yaml

# 创建token
kubectl -n kube-system create token admin-user

14.4. 登录dashboard

https://192.168.1.11:31716

15. ingress安装

15.1. 执行部署

wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml


vim deploy.yaml 

cat deploy.yaml | grep image:
        image: registry.cn-hangzhou.aliyuncs.com/chenby/controller:v1.7.0 
        image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.3.0 
        image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.3.0 


cat > backend.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
  labels:
    app.kubernetes.io/name: default-http-backend
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: default-http-backend
  template:
    metadata:
      labels:
        app.kubernetes.io/name: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5 
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: kube-system
  labels:
    app.kubernetes.io/name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app.kubernetes.io/name: default-http-backend
EOF

kubectl  apply -f deploy.yaml 
kubectl  apply -f backend.yaml 


cat > ingress-demo-app.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000
---
apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx"  
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000
EOF

# 等创建完成后在执行:
kubectl  apply -f ingress-demo-app.yaml 

kubectl  get ingress
NAME               CLASS   HOSTS                            ADDRESS     PORTS   AGE
ingress-host-bar   nginx   hello.chenby.cn,demo.chenby.cn   192.168.0.32   80      7s

15.2. 过滤查看ingress端口

# 修改为nodeport
kubectl edit svc -n ingress-nginx   ingress-nginx-controller

type: NodePort

[root@hello ~/yaml]# kubectl  get svc -A | grep ingress
[root@hello ~/yaml]#

16. FAQ

16.1. POD状态查看

kubectl describe pod -n NAMESPACE calico-kube-controllers-POD_NAME

16.2. POD删除

16.2.1. kubectl 展示搜索出的pod列表(含pod所在的namespace)

kubectl get pod -A | grep <podname>

[root@tcs ~]# kubectl get pod -A |grep yhl
ingress-nginx-yhl-large   yhl-large-ingress-nginx-controller-dc666995f-djk8k      1/1     Running             0          46d
ingress-nginx-yhl-large   yhl-large-ingress-nginx-controller-dc666995f-fncrq      1/1     Running             0          2m46s
ingress-nginx-yhl-large   yhl-large-ingress-nginx-controller-dc666995f-wxz2f      1/1     Running             0          4m30s
ingress-nginx-yhl         yhl-ingress-nginx-controller-fd565dddc-44z2d            1/1     Running             0          45d
ingress-nginx-yhl         yhl-ingress-nginx-controller-fd565dddc-nlst8            1/1     Running             0          45d
ingress-nginx-yhl         yhl-ingress-nginx-controller-fd565dddc-sjqx7            1/1     Running             0          45d

16.2.2. kubectl 删除pod命令

kubectl delete pod <podname> -n <namespace>

在进行删除pod命令时,会发现pod并未被真正删除,原因是k8s误认为我们要删除的pod异常挂了,会启用容灾机制,导致重新在拉起一个新的pod。
故,我们想要正常且彻底的删除一个pod,必须要先破坏掉他的容灾机制,即删除deployment机制。

16.2.3. 查看deployment信息

kubectl get deployment -n <namespace>

[root@tcs- ~]# kubectl get deployment -n ingress-nginx-yhl
NAME                                 READY   UP-TO-DATE   AVAILABLE   AGE
yhl-ingress-nginx-controller   3/3     3            3           45d

16.2.4. 删除deployment配置

kubectl delete deployment <deployment名> -n <namespace>

[root@tcs- ~]# kubectl delete deployment yhl-ingress-nginx-controller -n ingress-nginx-yhl
deployment.apps "yhl-ingress-nginx-controller" deleted

然后进行删除pod命令即可,我删除deployment后,再次查询pod发现,上面的pod已经开始自行删除了(这步可酌情处理)

kubectl delete pod <podname> -n <namespace>

# 附一个我这边删除deployment后pod自行删除的情况
[root@tcs- ~]# kubectl get pod -A|grep yuhonglei
ingress-nginx-yhl   yhl-ingress-nginx-controller-fd565dddc-44z2d            1/1     Terminating         0          45d
ingress-nginx-yhl   yhl-ingress-nginx-controller-fd565dddc-nlst8            1/1     Terminating         0          45d
ingress-nginx-yhl   yhl-ingress-nginx-controller-fd565dddc-sjqx7            1/1     Terminating         0          45d

16.3. 命令自动补全

yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

16.4. helm服务卸载

问题现象
通过helm install安装服务失败,修改chart后重新安装,提示error: cannot re-use a name that is still in use,执行helm uninstall提示Error: uninstall: Release not loaded

2、问题原因
helm install失败后,服务处于fail或者pending状态,使用helm -n {{namespace}} ls 无法找到名称重复服务,因此,无法重新安装,也无法卸载。

3、解决办法
a、使用helm -n <namespace> ls -a 查询全部,可以看到失败的服务;
b、使用helm -n <namespace> delete <packagename> 删除失败的安装;
c、使用helm install重新安装服务。

参考链接

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐