环境准备

主机规划

三主三从
hostname               ip                  内存              CPU
Master-50        192.168.91.50             8G                4C
Master-51        192.168.91.51             4G                4C
Master-52        192.168.91.52             4G                4C
Node-53          192.168.91.53             4G                4C
Node-54          192.168.91.54             4G                4C
Node-55          192.168.91.55             4G                4C
vip              192.168.91.199

初始化

#修改hosts文件
#所有主机操作
cat > /etc/hosts << 'EOF'
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
199.232.68.133 raw.githubusercontent.com
199.232.68.133 user-images.githubusercontent.com
199.232.68.133 avatars2.githubusercontent.com
199.232.68.133 avatars1.githubusercontent.com
192.168.91.10   master-10
192.168.91.11   node-11
192.168.91.12   node-12
192.168.91.34   nfs-34
192.168.91.13   node-13
192.168.91.14   node-14
192.168.91.50   master-50
192.168.91.51   master-51
192.168.91.52   master-52
192.168.91.53   node-53
192.168.91.54   node-54
192.168.91.55   node-55
EOF

#生成密钥对,并分发   master-50
[root@master-50 ~]# ssh-keygen -t rsa     #一路回车
ssh-copy-id -i .ssh/id_rsa.pub master-51
ssh-copy-id -i .ssh/id_rsa.pub master-52
ssh-copy-id -i .ssh/id_rsa.pub node-53
ssh-copy-id -i .ssh/id_rsa.pub node-54
ssh-copy-id -i .ssh/id_rsa.pub node-55

#关闭防火墙,所有主机
systemctl stop firewalld ; systemctl disable firewalld

#关闭selinux
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

#关闭交换分区
swapoff -a
sed -i '/swap/d' /etc/fstab
sed -i '/UUID/d' /etc/fstab   #克隆机操作

#修改内核参数#加载 br_netfilter 模块
modprobe br_netfilter
#验证模块是否加载成功:
lsmod |grep br_netfilter
#修改内核参数
cat > /etc/sysctl.d/k8s.conf << 'EOF'
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
#使刚才修改的内核参数生效
sysctl -p /etc/sysctl.d/k8s.conf


#为什么要开启 net.ipv4.ip_forward = 1 参数? 
kubeadm 初始化 k8s 如果报错:就表示没有开启 ip_forward,需要开启。
net.ipv4.ip_forward 是数据包转发:
出于安全考虑,Linux 系统默认是禁止数据包转发的。所谓转发即当主机拥有多于一块的网卡时,
其中一块收到数据包,根据数据包的目的 ip 地址将数据包发往本机另一块网卡,该网卡根据路由表继续
发送数据包。这通常是路由器所要实现的功能。
要让 Linux 系统具有路由转发功能,需要配置一个 Linux 的内核参数 net.ipv4.ip_forward。这个
参数指定了 Linux 系统当前对路由转发功能的支持情况;其值为 0 时表示禁止进行 IP 转发;如果是 1,
则说明 IP 转发功能已经打开。

#配置阿里源
yum install openssh-clients  lrzsz -y
cat /etc/yum.repos.d/CentOS-Base.repo << 'EOF'
# CentOS-Base.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the 
# remarked out baseurl= line instead.
#
#
 
[base]
name=CentOS-$releasever - Base - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/os/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/os/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#released updates 
[updates]
name=CentOS-$releasever - Updates - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/updates/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/extras/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/centosplus/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/centosplus/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#contrib - packages by Centos Users
[contrib]
name=CentOS-$releasever - Contrib - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/contrib/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/contrib/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/contrib/$basearch/
gpgcheck=1
enabled=0
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
EOF

#安装 ntpdate 命令,
yum install ntpdate -y #跟网络源做同步
ntpdate cn.pool.ntp.org
#把时间同步做成计划任务
crontab -e
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
#重启 crond 服务
service crond restart

#安装iptables
yum install iptables-services -y #禁用 iptables
service iptables stop && systemctl disable iptables
#清空防火墙规则
iptables -F

#开启ipvs
cat > /etc/sysconfig/modules/ipvs.modules << 'EOF'
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

#安装基础包
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet rsync

#安装docker
cd /etc/yum.repos.d/
cat > doker-ce.repo << 'EOF'
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge]
name=Docker CE Edge - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-debuginfo]
name=Docker CE Edge - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-source]
name=Docker CE Edge - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

yum makecache fast
yum install docker-ce docker-ce-cli containerd.io -y
systemctl start docker && systemctl enable docker.service && systemctl status docker
#镜像加速
tee /etc/docker/daemon.json << 'EOF'
{
  "registry-mirrors":    ["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]  
}
EOF
 #将docker改为systemd启动,kubelet会检查是否一致
systemctl daemon-reload
systemctl restart docker
systemctl status docker

搭建Etcd集群

#master节点
配置 etcd 工作目录
mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl

#master-50节点
安装签发证书工具 cfssl
mkdir /data/work -p
cd /data/work/
#cfssl-certinfo_linux-amd64 、cfssljson_linux-amd64 、cfssl_linux-amd64 上传到/data/work/目录下
[root@master-50 work]# ls
cfssl-certinfo_linux-amd64  cfssljson_linux-amd64  cfssl_linux-amd64
chmod +x *
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

配置 ca 证书

ca 证书请求文件
[root@master-50 work]# cat > ca-csr.json << 'EOF'
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ],
  "ca": {
          "expiry": "87600h"
  } 
}
EOF

ca 证书文件
[root@master-50 work]# cat > ca-config.json << 'EOF'
{
  "signing": {
      "default": {
          "expiry": "87600h"
      },
    "profiles": {
        "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "87600h"
          }
      }
    } 
}
EOF

签发证书

[root@master-50 work]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@master-50 work]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem

生成etcd证书

#配置 etcd 证书请求,hosts 的 ip 变成自己 etcd 所在节点的 ip(需要创建etcd的ip)
[root@xianchaomaster1 work]# cat > etcd-csr.json << 'EOF'
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.91.50",
    "192.168.91.51",
    "192.168.91.52",
    "192.168.91.199"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
  }]
}
EOF
#上述文件 hosts 字段中 IP 为所有 etcd 节点的集群内部通信 IP,可以预留几个,做扩容用。
[root@master-50 work]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2022/03/14 22:18:52 [INFO] generate received request
2022/03/14 22:18:52 [INFO] received CSR
2022/03/14 22:18:52 [INFO] generating key: rsa-2048
2022/03/14 22:18:53 [INFO] encoded CSR
2022/03/14 22:18:53 [INFO] signed certificate with serial number 531834867915325525144545785031530382928527606868
2022/03/14 22:18:53 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master-50 work]# ls etcd*.pem
etcd-key.pem  etcd.pem

部署etcd集群

#把 etcd-v3.4.13-linux-amd64.tar.gz 上传到/data/work 目录下
tar -xf etcd-v3.4.13-linux-amd64.tar.gz
cp -p etcd-v3.4.13-linux-amd64/etcd* /usr/local/bin/
scp -r etcd-v3.4.13-linux-amd64/etcd* master-51:/usr/local/bin/
scp -r etcd-v3.4.13-linux-amd64/etcd* master-52:/usr/local/bin/

#创建配置文件
[root@master-50 work]# cat > etcd.conf << 'EOF'
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.91.50:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.91.50:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.91.50:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.91.50:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.91.50:2380,etcd2=https://192.168.91.51:2380,etcd3=https://192.168.91.52:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#注:
ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群 Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入已有集群
#创建启动服务文件
[root@master-50 work]# cat > etcd.service << 'EOF'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem \
 --trusted-ca-file=/etc/etcd/ssl/ca.pem \
 --peer-cert-file=/etc/etcd/ssl/etcd.pem \
 --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
 --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
 --peer-client-cert-auth \
 --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

cp ca*.pem /etc/etcd/ssl/
cp etcd*.pem /etc/etcd/ssl/
cp etcd.conf /etc/etcd/
cp etcd.service /usr/lib/systemd/system/

[root@master-50 work]# for i in master-51 master-52;do rsync -vaz etcd.conf $i:/etc/etcd/;done
[root@master-50 work]# for i in master-51 master-52;do rsync -vaz etcd*.pem ca*.pem $i:/etc/etcd/ssl/;done
[root@master-50 work]# for i in master-51 master-52;do rsync -vaz etcd.service $i:/usr/lib/systemd/system/;done

#启动集群  master机器
mkdir -p /var/lib/etcd/default.etcd

#单独修改mster配置
[root@master-51 work]# cat > /etc/etcd/etcd.conf << 'EOF'
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.91.51:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.91.51:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.91.51:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.91.51:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.91.50:2380,etcd2=https://192.168.91.51:2380,etcd3=https://192.168.91.52:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

[root@master-52 work]# cat > /etc/etcd/etcd.conf << 'EOF'
#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.91.52:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.91.52:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.91.52:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.91.52:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.91.50:2380,etcd2=https://192.168.91.51:2380,etcd3=https://192.168.91.52:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master
systemctl daemon-reload
systemctl enable etcd.service
systemctl start etcd.service

systemctl status etcd

#master-50
[root@master-50 work]# ETCDCTL_API=3
[root@master-50 work]#  /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.91.50:2379,https://192.168.91.51:2379,https://192.168.91.52:2379 endpoint health
+----------------------------+--------+-------------+-------+
|          ENDPOINT          | HEALTH |    TOOK     | ERROR |
+----------------------------+--------+-------------+-------+
| https://192.168.91.50:2379 |   true |  8.475827ms |       |
| https://192.168.91.52:2379 |   true | 14.316844ms |       |
| https://192.168.91.51:2379 |   true | 106.94374ms |       |
+----------------------------+--------+-------------+-------+

安装K8S组件

下载安装包

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/
#把 kubernetes-server-linux-amd64.tar.gz 上传到 master50 上的/data/work 目录下:
#master-50,下同
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/

rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master-51:/usr/local/bin/
rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master-52:/usr/local/bin/

scp kubelet kube-proxy node-53:/usr/local/bin/
scp kubelet kube-proxy node-54:/usr/local/bin/
scp kubelet kube-proxy node-55:/usr/local/bin/
cd /data/work/
mkdir -p /etc/kubernetes/
mkdir -p /etc/kubernetes/ssl
mkdir /var/log/kubernetes

apiserver

启动 TLS Bootstrapping 机制

Master apiserver 启用 TLS 认证后,每个节点的 kubelet 组件都要使用由 apiserver 使用的 CA 签发的有效证书才能与 apiserver 通讯,当 Node 节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。

为了简化流程,Kubernetes 引入了 TLS bootstraping 机制来自动颁发客户端证书,kubelet 会以一个低权限用户自动向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署。

生成token

[root@master-50 work]# cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#格式:token,用户名,UID,用户组

创建 csr 请求文件

替换自己的ip
cat > kube-apiserver-csr.json << 'EOF'
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.91.50",
    "192.168.91.51",
    "192.168.91.52",
    "192.168.91.53",
    "192.168.91.54",
    "192.168.91.55",
    "192.168.91.199",
    "10.255.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。 由于该证书后续被kubernetes master 集群使用,需要将 master 节点的 IP 都填上,同时还需要填写 service 网络的首个IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个 IP,如 10.255.0.1)

生成apiserver证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#
kube-apiserver.pem  kube-apiserver-key.pem   kube-apiserver.csr

创建 api-server 的配置文件

cat > kube-apiserver.conf << 'EOF'
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
 --anonymous-auth=false \
 --bind-address=192.168.91.50 \
 --secure-port=6443 \
 --advertise-address=192.168.91.50 \
 --insecure-port=0 \
 --authorization-mode=Node,RBAC \
 --runtime-config=api/all=true \
 --enable-bootstrap-token-auth \
 --service-cluster-ip-range=10.255.0.0/16 \
 --token-auth-file=/etc/kubernetes/token.csv \
 --service-node-port-range=30000-50000 \
 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem  \
 --client-ca-file=/etc/kubernetes/ssl/ca.pem \
 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
 --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
 --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
 --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
 --service-account-issuer=https://kubernetes.default.svc.cluster.local \
 --etcd-cafile=/etc/etcd/ssl/ca.pem \
 --etcd-certfile=/etc/etcd/ssl/etcd.pem \
 --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
 --etcd-servers=https://192.168.91.50:2379,https://192.168.91.51:2379,https://192.168.91.52:2379 \
 --enable-swagger-ui=true \
 --allow-privileged=true \
 --apiserver-count=3 \
 --audit-log-maxage=30 \
 --audit-log-maxbackup=3 \
 --audit-log-maxsize=100 \
 --audit-log-path=/var/log/kube-apiserver-audit.log \
 --event-ttl=1h \
 --alsologtostderr=true \
 --logtostderr=false \
 --log-dir=/var/log/kubernetes \
 --v=4"
EOF

#修改bind-address , advertise-address 和 servers
--logtostderr:启用日志
--v:日志等级
--log-dir:日志目录
--etcd-servers:etcd 集群地址
--bind-address:监听地址
--secure-port:https 安全端口
--advertise-address:集群通告地址
--allow-privileged:启用授权
--service-cluster-ip-range:Service 虚拟 IP 地址段
--enable-admission-plugins:准入控制模块
--authorization-mode:认证授权,启用 RBAC 授权和节点自管理
--enable-bootstrap-token-auth:启用 TLS bootstrap 机制
--token-auth-file:bootstrap token 文件
--service-node-port-range:Service nodeport 类型默认分配端口范围
--kubelet-client-xxx:apiserver 访问 kubelet 客户端证书
--tls-xxx-file:apiserver https 证书
--etcd-xxxfile:连接 Etcd 集群证书 – -audit-log-xxx:审计日志

创建服务启动文件

cat > kube-apiserver.service << 'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

分发配置文件

\cp ca*.pem /etc/kubernetes/ssl
\cp kube-apiserver*.pem /etc/kubernetes/ssl/
\cp token.csv /etc/kubernetes/
\cp kube-apiserver.conf /etc/kubernetes/
\cp kube-apiserver.service /usr/lib/systemd/system/
rsync -vaz token.csv master-51:/etc/kubernetes/
rsync -vaz token.csv master-52:/etc/kubernetes/

rsync -vaz kube-apiserver*.pem master-51:/etc/kubernetes/ssl/
rsync -vaz kube-apiserver*.pem master-52:/etc/kubernetes/ssl/

rsync -vaz ca*.pem master-51:/etc/kubernetes/ssl/
rsync -vaz ca*.pem master-52:/etc/kubernetes/ssl/

rsync -vaz kube-apiserver.conf master-51:/etc/kubernetes/
rsync -vaz kube-apiserver.conf master-52:/etc/kubernetes/

rsync -vaz kube-apiserver.service master-51:/usr/lib/systemd/system/
rsync -vaz kube-apiserver.service master-52:/usr/lib/systemd/system/

#master-51 和 #master-52 分别修改kube-apiserver.conf

#master-51
sed -i 's#--bind-address=192.168.91.50#--bind-address=192.168.91.51#g'  /etc/kubernetes/kube-apiserver.conf
sed  -i 's#--advertise-address=192.168.91.50#--advertise-address=192.168.91.51#g'  /etc/kubernetes/kube-apiserver.conf

#master-52 
sed -i 's#--bind-address=192.168.91.50#--bind-address=192.168.91.52#g'  /etc/kubernetes/kube-apiserver.conf
sed  -i 's#--advertise-address=192.168.91.50#--advertise-address=192.168.91.52#g'  /etc/kubernetes/kube-apiserver.conf



#master
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver

systemctl status kube-apiserver

kubelet

创建 csr 请求文件

master-50

cat > admin-csr.json << 'EOF'
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "system:masters", 
      "OU": "system"
    }
  ] 
}
EOF

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#生成
admin.csr admin-key.pem admin.pem

cp admin*.pem /etc/kubernetes/ssl/

创建 kubeconfig 配置文件

1.设置集群参数
[root@master-50 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.50:6443 --kubeconfig=kube.config

#查看
cat kube.config
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0akNDQXA2Z0F3SUJBZ0lVZko1YW1RdTVQTlJkYW1kUnhDQWFhVElTYmhZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lURUxNQWtHQTFVRUJoTUNRMDR4RGpBTUJnTlZCQWdUQlVoMVltVnBNUTR3REFZRFZRUUhFd1ZYZFdoaApiakVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WnplWE4wWlcweEV6QVJCZ05WQkFNVENtdDFZbVZ5CmJtVjBaWE13SGhjTk1qSXdNekUxTURZMU16QXdXaGNOTXpJd016RXlNRFkxTXpBd1dqQmhNUXN3Q1FZRFZRUUcKRXdKRFRqRU9NQXdHQTFVRUNCTUZTSFZpWldreERqQU1CZ05WQkFjVEJWZDFhR0Z1TVF3d0NnWURWUVFLRXdOcgpPSE14RHpBTkJnTlZCQXNUQm5ONWMzUmxiVEVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU00Q3NnMXluQ3dJU0dLaGhvS2kzUFBsNndkZEFWa3oKTExGbzBreVg3bHhiaE1nbTJrNkRuME5rajFVWUl0VXNBcC80UTBaRm1odmpTVWNrVHNXQUxVb2M4MHpsR3duTgpsWmtidFVMbnJPUmZXZHBGOTRsd0lNcDFlRDF6aDdOVFNMKzZTQU1QQzVtck9iWmpjZWpRQmlGdk1JWnZPalFRCnNsMmkyT08zS3lTcmFBcHFzUTlMTVRDKy9EdUNOcUIyMnNlZU9mTTNhQnQxaFZLLzdHK29rSGpyZjZzQWVuK3cKRGdoemRZTjVPZ2RDdGhDZER1OWlrSTRGOGVsM0F0TUlMUlQ2Sk1wOGI0d2MxTnVrdEFvYlp1eFZMaTdkcHRqMwp2aFNvRS9Sa3ZnRjJYZUhGdUNMd05rQ3FxMjQrbnhIekw3K2laUTI5OWVNQlV2cGZ0RTY4aHNFQ0F3RUFBYU5tCk1HUXdEZ1lEVlIwUEFRSC9CQVFEQWdFR01CSUdBMVVkRXdFQi93UUlNQVlCQWY4Q0FRSXdIUVlEVlIwT0JCWUUKRkgrM3czWkhjeGtHL2N3VEFrZG9EZ05WVlFqVU1COEdBMVVkSXdRWU1CYUFGSCszdzNaSGN4a0cvY3dUQWtkbwpEZ05WVlFqVU1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQkM0Y1d1cUZZWVk3S2hnYlJzUjhWenRmL2dvbU9rClBwdkozbTZBSWVMUHVDalVsclRTd3Y4dmFTdStmMGNzQW5VYXREUXpSV2JjYUptSG9YL21nMFdocnFnNXpHVkgKV3AyYjRDVHZ5YmdiczkrTGowTlFwa3JJemlIbUZ4YlJWSzNWeXdpd2VZNkJNaXE5NU9hN0VNYnVmdE1OUnRHSgpocVBja005YVZ6K2tISDdzWUUrUEdZU3lGeVJWblVlNUp2Q2JsMlNLMU1LNlM4ZHpCbmMwa1BFMXBRRitxRnpyCmhaUEJlb3ZENFYxZ3Q3R3BiYkpST1h6bHgwdlYybEUrVWlabmhBeWVibi9DQTBHdVdtM2NiOXdDdTlnaENzZUsKb05jWTBHWnJjT1IzMjhSZkFsbkRTRTQrMXRXbWxlM1ZaRE5PcTlIazFZZys5TG45MVhtMVJCMUQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://192.168.91.50:6443
  name: kubernetes
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

2.设置客户端认证参数
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config

3.设置上下文参数
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config

4.设置当前上下文
kubectl config use-context kubernetes --kubeconfig=kube.config
mkdir ~/.kube -p
cp kube.config ~/.kube/config

5.授权 kubernetes 证书访问 kubelet api 权限
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

查看集群组件状态

[root@master-50 work]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.91.50:6443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
 
[root@master-50 work]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-0               Healthy     {"health":"true"}                                                                             
etcd-2               Healthy     {"health":"true"}                                                                             
etcd-1               Healthy     {"health":"true"} 
  
[root@master-50 work]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.255.0.1   <none>        443/TCP   4h23m

同步 kubectl 文件到其他节点

#master-51.master-52
mkdir /root/.kube/

#master-50
rsync -vaz /root/.kube/config master-51:/root/.kube/
rsync -vaz /root/.kube/config master-52:/root/.kube/

配置 kubectl 子命令补全

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source /root/.kube/completion.bash.inc
source $HOME/.bash_profile

kube-controller-manager

创建 csr 请求文件

cat > kube-controller-manager-csr.json << 'EOF'
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
    "127.0.0.1",
    "192.168.91.50",
    "192.168.91.51",
    "192.168.91.52",
    "192.168.91.199"
  ],
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "system:kube-controller-manager",
      "OU": "system"
    }
  ]
}
EOF

注: hosts 列表包含所有 kube-controller-manager 节点 IP; CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

#
kube-controller-manager.csr kube-controller-manager-key.pem kube-controller-manager.pem

创建 kube-controller-manager 的 kubeconfig

#1.设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.50:6443 --kubeconfig=kube-controller-manager.kubeconfig

#2.设置客户端认证参数

kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig

#3.设置上下文参数

kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

#4.设置当前上下文

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

创建配置文件 kube-controller-manager.conf

cat > kube-controller-manager.conf << 'EOF'
KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \
 --secure-port=10252 \
 --bind-address=127.0.0.1 \
 --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
 --service-cluster-ip-range=10.255.0.0/16 \
 --cluster-name=kubernetes \
 --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
 --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
 --allocate-node-cidrs=true \
 --cluster-cidr=10.0.0.0/16 \
 --experimental-cluster-signing-duration=87600h \
 --root-ca-file=/etc/kubernetes/ssl/ca.pem \
 --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
 --leader-elect=true \
 --feature-gates=RotateKubeletServerCertificate=true \
 --controllers=*,bootstrapsigner,tokencleaner \
 --horizontal-pod-autoscaler-use-rest-clients=true \
 --horizontal-pod-autoscaler-sync-period=10s \
 --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
 --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
 --use-service-account-credentials=true \
 --alsologtostderr=true \
 --logtostderr=false \
 --log-dir=/var/log/kubernetes \
 --v=2"
EOF

创建启动文件

cat > kube-controller-manager.service << 'EOF'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

启动服务

\cp kube-controller-manager*.pem /etc/kubernetes/ssl/
\cp kube-controller-manager.kubeconfig /etc/kubernetes/
\cp kube-controller-manager.conf /etc/kubernetes/
\cp kube-controller-manager.service /usr/lib/systemd/system/
rsync -vaz kube-controller-manager*.pem master-51:/etc/kubernetes/ssl/
rsync -vaz kube-controller-manager*.pem master-52:/etc/kubernetes/ssl/
rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master-51:/etc/kubernetes/
rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master-52:/etc/kubernetes/
rsync -vaz kube-controller-manager.service master-51:/usr/lib/systemd/system/
rsync -vaz kube-controller-manager.service master-52:/usr/lib/systemd/system/

#master
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

kube-scheduler

cat > kube-scheduler-csr.json << 'EOF'
{
  "CN": "system:kube-scheduler",
  "hosts": [
    "127.0.0.1",
    "192.168.91.50",
    "192.168.91.51",
    "192.168.91.52",
    "192.168.91.199"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "system:kube-scheduler",
      "OU": "system"
    }
  ] 
}
EOF

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

#
kube-scheduler.csr kube-scheduler-key.pem kube-scheduler.pem

创建 kube-scheduler 的 kubeconfig

#1.设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.50:6443 --kubeconfig=kube-scheduler.kubeconfig
#2.设置客户端认证参数
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
#3.设置上下文参数
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
#4.设置当前上下文
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

创建配置文件 kube-scheduler.conf

cat > kube-scheduler.conf << 'EOF'
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF

创建服务启动文件

cat > kube-scheduler.service << 'EOF'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

启动服务

\cp kube-scheduler*.pem /etc/kubernetes/ssl/
\cp kube-scheduler.kubeconfig /etc/kubernetes/
\cp kube-scheduler.conf /etc/kubernetes/
\cp kube-scheduler.service /usr/lib/systemd/system/
rsync -vaz kube-scheduler*.pem master-51:/etc/kubernetes/ssl/
rsync -vaz kube-scheduler*.pem master-52:/etc/kubernetes/ssl/
rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master-51:/etc/kubernetes/
rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master-52:/etc/kubernetes/
rsync -vaz kube-scheduler.service master-51:/usr/lib/systemd/system/
rsync -vaz kube-scheduler.service master-52:/usr/lib/systemd/system/

#master
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler 

导入离线镜像压缩包

#把 pause-cordns.tar.gz 上传到 node 节点,手动解压
docker load -i pause-cordns.tar.gz

kubelet组件

以下在master-50执行

创建 kubelet-bootstrap.kubeconfig

cd /data/work/
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.50:6443 --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
#生成
kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

创建配置文件 kubelet.json

#"cgroupDriver": "systemd"要和 docker 的驱动一致。
docker info查看启动类型
#address 替换为自己 node 的 IP 地址。

[root@master-50 work]# cat > kubelet.json << 'EOF'
{
   "kind": "KubeletConfiguration",
   "apiVersion": "kubelet.config.k8s.io/v1beta1",
   "authentication": {
    "x509": {
     "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
   },
   "webhook": {
     "enabled": true,
     "cacheTTL": "2m0s"
   },
   "anonymous": {
     "enabled": false
     }
   },
   "authorization": {
     "mode": "Webhook",
     "webhook": {
       "cacheAuthorizedTTL": "5m0s",
       "cacheUnauthorizedTTL": "30s"
     }
   },
   "address": "192.168.91.53",
   "port": 10250,
   "readOnlyPort": 10255,
   "cgroupDriver": "systemd",
   "hairpinMode": "promiscuous-bridge",
   "serializeImagePulls": false,
   "featureGates": {
     "RotateKubeletClientCertificate": true,
     "RotateKubeletServerCertificate": true
   },
   "clusterDomain": "cluster.local.",
   "clusterDNS": ["10.255.0.2"]
}
EOF

#注:kubelete.json 配置文件 address 改为各个节点的 ip 地址,在各个 work 节点上启动服务

cat > kubelet.service << 'EOF'
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
 --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
 --cert-dir=/etc/kubernetes/ssl \
 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
 --config=/etc/kubernetes/kubelet.json \
 --network-plugin=cni \
 --pod-infra-container-image=k8s.gcr.io/pause:3.2 \
 --alsologtostderr=true \
 --logtostderr=false \
 --log-dir=/var/log/kubernetes \
 --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

#注: –hostname-override:显示名称,集群中唯一
–network-plugin:启用 CNI 
–kubeconfig:空路径,会自动生成,后面用于连接 apiserver 
–bootstrap-kubeconfig:首次启动向 apiserver 申请证书
–config:配置参数文件
–cert-dir:kubelet 证书生成目录
–pod-infra-container-image:管理 Pod 网络容器的镜像

配置并启动

#node
mkdir /etc/kubernetes/ssl -p
#master-50
scp kubelet-bootstrap.kubeconfig kubelet.json node-53:/etc/kubernetes/
scp kubelet-bootstrap.kubeconfig kubelet.json node-54:/etc/kubernetes/
scp kubelet-bootstrap.kubeconfig kubelet.json node-55:/etc/kubernetes/

scp ca.pem node-53:/etc/kubernetes/ssl/
scp ca.pem node-54:/etc/kubernetes/ssl/
scp ca.pem node-55:/etc/kubernetes/ssl/

scp kubelet.service node-53:/usr/lib/systemd/system/
scp kubelet.service node-54:/usr/lib/systemd/system/
scp kubelet.service node-55:/usr/lib/systemd/system/


#node-54,node-55
[root@node-54 ~]# sed -i 's/192.168.91.53/192.168.91.54/g' /etc/kubernetes/kubelet.json
[root@node-55 ~]# sed -i 's/192.168.91.53/192.168.91.55/g' /etc/kubernetes/kubelet.json

#node
#启动服务
mkdir /var/lib/kubelet 
mkdir /var/log/kubernetes
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

执行如下命令可以看到一个 worker 节点发送了一个 CSR 请求:

[root@master-50 work]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-8D9oM8HOqk3-RODGZ_G7sLjurLXVqgXNXJCd7NQA9Gs   2m1s    kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-GaCvP4bcS9JJZbp42QjlGm308Ii0VxK30a1DwP9pum8   2m15s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-THFZWqCfQuApPhVxxoA_A1HccWlhfmw377HWFqAIM3s   2m3s    kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending


#批准
[root@master-50 work]# kubectl certificate approve node-csr-8D9oM8HOqk3-RODGZ_G7sLjurLXVqgXNXJCd7NQA9Gs  node-csr-GaCvP4bcS9JJZbp42QjlGm308Ii0VxK30a1DwP9pum8  node-csr-THFZWqCfQuApPhVxxoA_A1HccWlhfmw377HWFqAIM3s
certificatesigningrequest.certificates.k8s.io/node-csr-8D9oM8HOqk3-RODGZ_G7sLjurLXVqgXNXJCd7NQA9Gs approved
certificatesigningrequest.certificates.k8s.io/node-csr-GaCvP4bcS9JJZbp42QjlGm308Ii0VxK30a1DwP9pum8 approved
certificatesigningrequest.certificates.k8s.io/node-csr-THFZWqCfQuApPhVxxoA_A1HccWlhfmw377HWFqAIM3s approved

[root@master-50 work]#  kubectl get nodes
NAME      STATUS     ROLES    AGE   VERSION
node-53   NotReady   <none>   12s   v1.20.7
node-54   NotReady   <none>   12s   v1.20.7
node-55   NotReady   <none>   12s   v1.20.7

kube-proxy 组件

创建 csr 请求

cat > kube-proxy-csr.json << 'EOF'
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ] 
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

#创建 kubeconfig 文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.91.50:6443 --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#生成
kube-proxy.kubeconfig kube-proxy.pem kube-proxy.service kube-proxy.yaml

创建 kube-proxy 配置文件

cat > kube-proxy.yaml << 'EOF'
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.91.53
clientConnection:
 kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 192.168.91.0/24
healthzBindAddress: 192.168.91.53:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.91.53:10249
mode: "ipvs"
EOF

创建服务启动文件

cat > kube-proxy.service << 'EOF'
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
 --config=/etc/kubernetes/kube-proxy.yaml \
 --alsologtostderr=true \
 --logtostderr=false \
 --log-dir=/var/log/kubernetes \
 --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
scp kube-proxy.kubeconfig kube-proxy.yaml node-53:/etc/kubernetes/
scp kube-proxy.service node-53:/usr/lib/systemd/system/

scp kube-proxy.kubeconfig kube-proxy.yaml node-54:/etc/kubernetes/
scp kube-proxy.service node-54:/usr/lib/systemd/system/

scp kube-proxy.kubeconfig kube-proxy.yaml node-55:/etc/kubernetes/
scp kube-proxy.service node-55:/usr/lib/systemd/system/

#修改配置
#node-54
sed -i 's#192.168.91.53#192.168.91.54#g' /etc/kubernetes/kube-proxy.yaml

#node-55
sed -i 's#192.168.91.53#192.168.91.55#g' /etc/kubernetes/kube-proxy.yaml

#node启动服务
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

calico 组件

#解压离线镜像压缩包
#把 cni.tar.gz 和 node.tar.gz 上传到 node 节点,手动解压
docker load -i cni.tar.gz
docker load -i node.tar.gz

##把 calico.yaml 文件上传到 master-50 上的的/data/work 目录
[root@master-50 work]#  kubectl apply -f calico.yaml
[root@master-50 work]#  kubectl get pods -n kube-system
NAME                READY   STATUS    RESTARTS   AGE
calico-node-cl2xv   1/1     Running   0          33s
calico-node-dlrd8   1/1     Running   0          33s
calico-node-kc2j6   1/1     Running   0          33s
[root@master-50 work]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
node-53   Ready    <none>   42m   v1.20.7
node-54   Ready    <none>   42m   v1.20.7
node-55   Ready    <none>   42m   v1.20.7

coredns 组件

[root@master-50 work]# kubectl apply -f coredns.yaml
[root@master-50 work]#  kubectl get pods -n kube-system
NAME                       READY   STATUS    RESTARTS   AGE
calico-node-cl2xv          1/1     Running   0          2m8s
calico-node-dlrd8          1/1     Running   0          2m8s
calico-node-kc2j6          1/1     Running   0          2m8s
coredns-7bf4bd64bd-x9bgs   1/1     Running   0          25s
[root@master-50 work]# kubectl get svc -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.255.0.2   <none>        53/UDP,53/TCP,9153/TCP   42s
[root@master-50 work]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
node-53   Ready    <none>   44m   v1.20.7
node-54   Ready    <none>   44m   v1.20.7
node-55   Ready    <none>   44m   v1.20.7

部署 tomcat 服务

#把 tomcat.tar.gz 和 busybox-1-28.tar.gz 上传到 onode,手动解压
docker load -i tomcat.tar.gz
docker load -i busybox-1-28.tar.gz
#tomcat.yaml
apiVersion: v1  #pod属于k8s核心组v1
kind: Pod  #创建的是一个Pod资源
metadata:  #元数据
  name: demo-pod  #pod名字
  namespace: default  #pod所属的名称空间
  labels:
    app: myapp  #pod具有的标签
    env: dev      #pod具有的标签
spec:
  containers:      #定义一个容器,容器是对象列表,下面可以有多个name
  - name:  tomcat-pod-java  #容器的名字
    ports:
    - containerPort: 8080
    image: tomcat:8.5-jre8-alpine   #容器使用的镜像
    imagePullPolicy: IfNotPresent
  - name: busybox
    image: busybox:latest
    command:  #command是一个列表,定义的时候下面的参数加横线
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
#tomcat-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: tomcat
spec:
  type: NodePort
  ports:
    - port: 8080    #容器内的端口
      nodePort: 30080   #主机端口
  selector:
    app: myapp
    env: dev
[root@master-50 work]# kubectl apply -f tomcat.yaml
[root@master-50 work]# kubectl apply -f tomcat-service.yaml

[root@master-50 work]# kubectl  get pods -o wide
NAME       READY   STATUS    RESTARTS   AGE   IP         NODE      NOMINATED NODE   READINESS GATES
demo-pod   2/2     Running   0          69s   10.0.2.2   node-54   <none>           <none>

[root@master-50 work]# kubectl get svc -A
NAMESPACE     NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default       kubernetes   ClusterIP   10.255.0.1      <none>        443/TCP                  9h
default       tomcat       NodePort    10.255.192.22   <none>        8080:30080/TCP           6s
kube-system   kube-dns     ClusterIP   10.255.0.2      <none>        53/UDP,53/TCP,9153/TCP   8h

#curl node_ip:30080

验证 cordns 是否正常

[root@master-50 work]#  kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping qq.com
PING qq.com (183.3.226.35): 56 data bytes
64 bytes from 183.3.226.35: seq=0 ttl=127 time=5.519 ms
64 bytes from 183.3.226.35: seq=1 ttl=127 time=5.643 ms
^C
--- qq.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 5.519/5.581/5.643 ms
#通过上面可以看到能访问网络
/ # nslookup kubernetes.default.svc.cluster.local
Server:    10.255.0.2
Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.255.0.1 kubernetes.default.svc.cluster.local
/ # nslookup tomcat.default.svc.cluster.local
Server:    10.255.0.2
Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

Name:      tomcat.default.svc.cluster.local
Address 1: 10.255.192.22 tomcat.default.svc.cluster.local
/ # exit
pod "busybox" deleted

#注意:
busybox 要用指定的 1.28 版本,不能用最新版本,最新版本,nslookup 会解析不到 dns 和 ip

10.255.0.2 就是我们 coreDNS 的 clusterIP,说明 coreDNS 配置好了。
解析内部 Service 的名称,是通过 coreDNS 去解析的。

apiserver 高可用

安装keepalived

#master
yum install nginx keepalived nginx-mod-stream -y

nginx

#master
cat > /etc/nginx/nginx.conf << 'EOF'
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
  worker_connections 1024;
}

# 四层负载均衡,为两台 Master apiserver 组件提供负载均衡
stream {

  log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
   access_log /var/log/nginx/k8s-access.log main;

   upstream k8s-apiserver {
     server 192.168.91.50:6443; # master-50 APISERVER IP:PORT
     server 192.168.91.51:6443; # master-51 APISERVER IP:PORT
     server 192.168.91.52:6443; # master-52 APISERVER IP:PORT
   }
 
    server {
      listen 16443; # 由于 nginx 与 master 节点复用,这个监听端口不能是 6443,否则会冲突
      proxy_pass k8s-apiserver;
    } 
  }

  http {
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';
    access_log /var/log/nginx/access.log main;

    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
  keepalive_timeout 65;
  types_hash_max_size 2048;

  include /etc/nginx/mime.types;
  default_type application/octet-stream;

  server {
    listen 80 default_server;
    server_name _;
    location / {
    }
  }
}
EOF

keepalived

#主 keepalived
cat > /etc/keepalived/keepalived.conf << 'EOF'
global_defs { 
    notification_email { 
      acassen@firewall.loc 
      failover@firewall.loc 
      sysadmin@firewall.loc 
    } 
    notification_email_from Alexandre.Cassen@firewall.loc 
    smtp_server 127.0.0.1 
    smtp_connect_timeout 30
    router_id NGINX_MASTER
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33 # 修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID 实例,每个实例是唯一的
    priority 100 # 优先级,备服务器设置 90 
    advert_int 1 # 指定 VRRP 心跳包通告间隔时间,默认 1 秒
    authentication { 
      auth_type PASS 
      auth_pass 1111 
    } 
    # 虚拟 IP
    virtual_ipaddress { 
        192.168.40.199/24
    } 
    track_script {
        check_nginx
    } 
}
EOF
#vrrp_script:指定检查 nginx 工作状态脚本(根据 nginx 状态判断是否故障转移)
#virtual_ipaddress:虚拟 IP(VIP)

cat > /etc/keepalived/check_nginx.sh << 'EOF'
#!/bin/bash
count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
EOF

chmod +x /etc/keepalived/check_nginx.sh



#备 keepalive
cat > /etc/keepalived/keepalived.conf << 'EOF'
global_defs { 
    notification_email { 
      acassen@firewall.loc 
      failover@firewall.loc 
      sysadmin@firewall.loc 
    } 
    notification_email_from Alexandre.Cassen@firewall.loc 
    smtp_server 127.0.0.1 
    smtp_connect_timeout 30
    router_id NGINX_BACKUP
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP
    interface ens33 # 修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID 实例,每个实例是唯一的
    priority 90 # 优先级,备服务器设置 90 
    advert_int 1 # 指定 VRRP 心跳包通告间隔时间,默认 1 秒
    authentication { 
      auth_type PASS 
      auth_pass 1111 
    } 
    # 虚拟 IP
    virtual_ipaddress { 
        192.168.40.199/24
    } 
    track_script {
        check_nginx
    } 
}
EOF


cat > /etc/keepalived/check_nginx.sh << 'EOF'
#!/bin/bash
count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
EOF
chmod +x /etc/keepalived/check_nginx.sh
#注:keepalived 根据脚本返回状态码(0 为工作正常,非 0 不正常)判断是否故障转移。

启动

systemctl daemon-reload
systemctl start nginx
systemctl enable nginx
systemctl enable keepalived.service
systemctl start keepalived.service
systemctl status keepalived.service
systemctl status nginx

systemctl restart keepalived.service
systemctl status keepalived.service

漂移实验

[root@master-50 ~]# ip a | grep 199
    inet 192.168.40.199/24 scope global ens33
[root@master-50 ~]# systemctl stop nginx.service 
[root@master-50 ~]# ip a | grep 199

[root@master-51 keepalived]# ip a | grep 199
    inet 192.168.40.199/24 scope global ens33
[root@master-51 keepalived]# systemctl stop nginx.service 

[root@master-52 keepalived]# ip a | grep 199
    inet 192.168.40.199/24 scope global ens33

#记得打开关闭的两个nginx服务

以上内容基于51CTO 讲师 先超老师 的课件

报错

Mar 16 20:17:41 node-53 kubelet: F0316 20:17:41.024366   60098 server.go:269] failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelebootstrap" cannot create resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope


node安装kubelet的时候有以下报错导致kubelet起不来,百度报错都指向没有创建  clusterrolebinding  ,但是
kubectl get clusterrolebinding 发现是有这个用户的,搞了很久最后认真看了下报错

 User "kubelebootstrap" cannot 
 
 发现用户复制少了几个字母,导致生成的token错误,以下为当时的token.csv
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelebootstrap,10001,"system:kubelet-bootstrap"
EOF
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐