一、集群规划

k8s-master110.0.19.127kube-apiserver,kube-controller-manager,kube-scheduler,etcd
k8s-master210.0.19.128kube-apiserver,kube-controller-manager,kube-scheduler
k8s-node110.0.19.129kube-proxy,kubelet,docker,etcd
k8s-node210.0.19.130kube-proxy,kubelet,docker,etcd
k8s-loadbalancer1(Master)10.0.19.131 VIP:10.0.19.133nginx+keepalived
k8s-loadbalancer2(BAKUP)10.0.19.132nginx+keepalived

操作系统版本:7.3
架构
在这里插入图片描述

在这里插入图片描述

二、初始化服务器

1 设置防火墙为 Iptables 并设置空规则
【所有节点都执行】

[root@k8s-master1 ~]# systemctl stop firewalld
[root@k8s-master1 ~]# systemctl disable firewalld
[root@k8s-master1 ~]# yum -y install iptables-services ipvsadm &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save

2 关闭selinux
【所有节点都执行】

# setenforce 0
# vim /etc/selinux/config
	修改SELINUX=enforcing 为 SELINUX=disabled

3 配置主机名
【所有节点都执行】

hostnamectl set-hostname 主机名

4 配置名称解析
【所有节点都执行】

# vi /etc/hosts
添加如下六行
10.0.19.127    k8s-master1
10.0.19.128    k8s-master2
10.0.19.129    k8s-node1
10.0.19.130    k8s-node2
10.0.19.131    k8s-loadbalancer1
10.0.19.132    k8s-loadbalancer2

5 配置时间同步
选择一个节点作为服务端,剩下的作为客户端
master1为时间服务器的服务端
其他的为时间服务器的客户端

1)配置k8s-master1

# yum install chrony -y
# vim /etc/chrony.config
修改三项
	server 127.127.1.0 iburst
	allow 10.0.19.0/24
	local stratum 10
# systemctl start chronyd
# systemctl enable chronyd
# ss -unl | grep 123
UNCONN     0      0            *:123                      *:*   

2)配置其他节点

# yum install chrony -y
# vim /etc/chrony.conf
	server 10.0.19.127 iburst
# systemctl start chronyd
# systemctl enable chronyd
# chronyc sources
	210 Number of sources = 1
	MS Name/IP address         Stratum Poll Reach LastRx Last sample               
	===============================================================================
	^* k8s-master1                  10   6    17     4    +11us[  +79us] +/-   95us

6 关闭交换分区
【所有节点都执行】

[root@k8s-master1 ~]# swapoff -a
[root@k8s-master1 ~]# vim /etc/fstab
删除一行:

检查是否关闭成功
[root@k8s-master1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:           2827         157        2288           9         380        2514
Swap:             0           0           0

7、调整内核参数,对于 K8S
【所有节点都执行】

cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
EOF
cp kubernetes.conf  /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

出现,不用担心,因为没开通网桥我们后面会开通
在这里插入图片描述

8、关闭系统不需要服务,并升级systemd

yum -y upgrade systemd  #通过 centos 更新 systemd因为我的比较旧
systemctl stop postfix && systemctl disable postfix

9、设置 rsyslogd 和 systemd journald
【所有节点都执行】

mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent

# 压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

# 最大占用空间 10G
SystemMaxUse=10G

# 单日志文件最大 200M
SystemMaxFileSize=200M

# 日志保存时间 2 周
MaxRetentionSec=2week

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald

10、升级系统内核为 4.44
【所有节点都执行】
CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
我已经下载到百度网盘:
链接:https://pan.baidu.com/s/1kQ48A-St03MzY2BWakad0Q
提取码:pwcq

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
yum -y install kernel-lt-4.4.222-1.el7.elrepo.x86_64.rpm #如果下载不了直接下载我百度网盘的
# 查看内核版本
cat /boot/grub2/grub.cfg |grep 4.4
# 设置开机从新内核启动
grub2-set-default 'CentOS Linux (4.4.222-1.el7.elrepo.x86_64) 7 (Core)'
# 重启
init 6
# 查看内核
uname -r

11、kube-proxy开启ipvs的前置条件
【所有节点都执行】

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

12、所有node节点安装docker,这里就按照这个链接来
https://blog.csdn.net/a13568hki/article/details/107068006

三、Kubernetes安装

一、给etcd颁发证书

流程说简介
1)创建证书颁发机构
2)填写表单–写明etcd所在节点的IP
3)向证书颁发机构申请证书
下载地址:https://github.com/cloudflare/cfssl/releases
百度网盘链接:https://pan.baidu.com/s/1ce-H7xktQTwXcr8q2x_xjA
提取码:kqu3
第一步:上传TLS安装包,创建证书颁发机构,etcd和k8s有模板在我百度云盘上

[root@k8s-master1 TLS]# ll
-rwxr-xr-x 1 root root 16659824 3月  15 11:03 cfssl_1.6.1_linux_amd64
-rwxr-xr-x 1 root root 13502544 3月  15 14:47 cfssl-certinfo_1.6.1_linux_amd64
-rwxr-xr-x 1 root root 11029744 3月  15 14:32 cfssljson_1.6.1_linux_amd64
drwxr-xr-x 2 root root  99 3月  15 15:21 etcd
drwxr-xr-x 2 root root 125 3月  15 15:21 k8s
[root@k8s-master1 TLS]# mv cfssl_1.6.1_linux_amd64 cfssl
[root@k8s-master1 TLS]# mv cfssl-certinfo_1.6.1_linux_amd64 cfssl-certinfo
[root@k8s-master1 TLS]# mv cfssljson_1.6.1_linux_amd64 cfssljson
[root@k8s-master1 TLS]# mv cfssl* /usr/local/bin/
[root@k8s-master1 etcd]# chmod +x /usr/local/bin/cfssl*

第二步:填写表单–写明etcd所在节点的IP

修改host中的IP地址,这里的IP是etcd和master所在节点的IP地址

[root@k8s-master1 TLS]# cat etcd/server-csr.json
{
    "CN": "etcd",
    "hosts": [
        "10.0.19.127",
        "10.0.19.128",
        "10.0.19.129",
        "10.0.19.130"
        ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}

第三步:向证书颁发机构申请证书

[root@k8s-master1 TLS]# cd etcd/
[root@k8s-master1 etcd]# cat generate_etcd_cert.sh
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - //主要是执行这个命令,就自建 CA
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server //颁发证书
[root@k8s-master1 etcd]# chmod +x generate_etcd_cert.sh
[root@k8s-master1 etcd]# ./generate_etcd_cert.sh
[root@k8s-master1 etcd]# ls *pem
ca-key.pem  ca.pem  server-key.pem  server.pem
[root@k8s-master1 etcd]# cd

二、部署etcd

etcd需要三台虚拟机
在master、node1、node2上分别安装一个etcd
下载地址:https://github.com/etcd-io/etcd/releases/tag/v3.3.10
百度网盘链接:https://pan.baidu.com/s/1rb79E2EtgvhQOy-s7nRHPw
提取码:wro8

[root@k8s-master1 ~]# ls etcd-v3.3.10-linux-amd64.tar.gz 
etcd-v3.3.10-linux-amd64.tar.gz
[root@k8s-master1 ~]# tar xf etcd-v3.3.10-linux-amd64.tar.gz 
[root@k8s-master1 ~]# ls etcd-v3.3.10-linux-amd64/  #主要是etcd和etcdctl这两个程序
Documentation  etcd  etcdctl  README-etcdctl.md  README.md  READMEv2-etcdctl.md
[root@k8s-master1 ~]# mkdir -p  /opt/etcd/{cfg,ssl,bin}  #创建配置,证书,启动目录
[root@k8s-master1 ~]# mv etcd-v3.3.10-linux-amd64/etcd* /opt/etcd/bin/  #拷贝启动文件
[root@k8s-master1 ~]# cp /root/TLS/etcd/{ca.pem,server.pem,server-key.pem} /opt/etcd/ssl/ #拷贝etcd的证书

#创建配置文件

cat > /opt/etcd/cfg/etcd.conf   <<EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.19.127:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.19.127:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.19.127:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.19.127:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.0.19.127:2380,etcd-2=https://10.0.19.129:2380,etcd-3=https://10.0.19.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#创建管理程序

cat > /usr/lib/systemd/system/etcd.service <<'EOF'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
        --name=${ETCD_NAME} \
        --data-dir=${ETCD_DATA_DIR} \
        --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
        --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
        --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
        --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
        --initial-cluster=${ETCD_INITIAL_CLUSTER} \
        --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
        --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} \
        --cert-file=/opt/etcd/ssl/server.pem \
        --key-file=/opt/etcd/ssl/server-key.pem \
        --peer-cert-file=/opt/etcd/ssl/server.pem \
        --peer-key-file=/opt/etcd/ssl/server-key.pem \
        --trusted-ca-file=/opt/etcd/ssl/ca.pem \
        --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

将etc管理程序和程序目录发送到node1 和node2

[root@k8s-master1 ~]# scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
[root@k8s-master1 ~]# scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/
[root@k8s-master1 ~]# scp -r /opt/etcd/ root@k8s-node1:/opt/
[root@k8s-master1 ~]# scp -r /opt/etcd/ root@k8s-node2:/opt/

在node1上修改etcd的配置文件

[root@k8s-node1 ~]# vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.19.129:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.19.129:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.19.129:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.19.129:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.0.19.127:2380,etcd-2=https://10.0.19.129:2380,etcd-3=https://10.0.19.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

在node2上修改etcd的配置文件

[root@k8s-node2 ~]# vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.19.130:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.19.130:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.19.130:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.19.130:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.0.19.127:2380,etcd-2=https://10.0.19.129:2380,etcd-3=https://10.0.19.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

在三个节点一次启动etcd服务

systemctl start etcd
systemctl enable etcd

检查是否启动成功

/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://10.0.19.127:2379,https://10.0.19.129:2379,https://10.0.19.130:2379" cluster-health

三、为api server签发证书

[root@k8s-master1 ~]# cat /root/TLS/k8s/server-csr.json 
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local",
      "10.0.19.127",
      "10.0.19.128",
      "10.0.19.129",
      "10.0.19.130",
      "10.0.19.131",
      "10.0.19.132",
      "10.0.19.133"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
[root@k8s-master1 ~]# cd /root/TLS/k8s
[root@k8s-master1 k8s]# chmod +x generate_k8s_cert.sh 
[root@k8s-master1 k8s]# ./generate_k8s_cert.sh
[root@k8s-master1 k8s]# cd

其中kube-proxy-csr.json该证书只会被 kube-proxy 当做 client 证书使用,所以 hosts 字段为空
在这里插入图片描述

四、部署master服务

下载k8s二进制包
下载地址:https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
百度网盘链接:https://pan.baidu.com/s/1yDMQCNz74As7tdbHc1kxjg
提取码:av0g
执行下面操作前,先解压kubernetes-server-linux-amd64.tar.gz

[root@k8s-master1 ~]# tar -zxvf kubernetes-server-linux-amd64.tar.gz 
#进入 解压后将kube-apiserver、kube-controller-manager、kube-scheduler以及管理要使用的kubectl二进制命令文件
[root@k8s-master1 ~]# mkdir -p /opt/kubernetes/{bin,cfg,logs,ssl}   #创建配置,证书,启动,日志目录
[root@k8s-master1 ~]# cd kubernetes/server/bin/
[root@k8s-master1 bin]# cp kube-apiserver kube-controller-manager kube-scheduler kubectl /opt/kubernetes/bin/  #拷贝启动文件
[root@k8s-master1 bin]# cp /root/TLS/k8s/{ca*pem,server.pem,server-key.pem} /opt/kubernetes/ssl/ #拷贝证书

为master组件添加配置和管理程序
添加apiservice的配置文件,配置修改成自己的ip

cat > /opt/kubernetes/cfg/kube-apiserver.conf <<'EOF'
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://10.0.19.127:2379,https://10.0.19.129:2379,https://10.0.19.130:2379 \
--bind-address=10.0.19.127 \
--secure-port=6443 \
--advertise-address=10.0.19.127 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF

#编辑apiservice管理程序

cat > /usr/lib/systemd/system/kube-apiserver.service <<'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

添加kube-controller-manager的配置文件

cat > /opt/kubernetes/cfg/kube-controller-manager.conf <<'EOF'
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \
--master=127.0.0.1:8080 \
--address=127.0.0.1 \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s"
EOF

#编辑kube-controller-manager管理程序

cat > /usr/lib/systemd/system/kube-controller-manager.service <<'EOF'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

添加kube-scheduler的配置文件

cat > /opt/kubernetes/cfg/kube-scheduler.conf <<'EOF'
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect \
--master=127.0.0.1:8080 \
--address=127.0.0.1"
EOF

#编辑kube-scheduler管理程序

cat > /usr/lib/systemd/system/kube-scheduler.service <<'EOF'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

启动master

[root@k8s-master1 cfg]# echo 'c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"' > /opt/kubernetes/cfg/token.csv  #拷贝自动认证用户信息,必须的
systemctl start kube-apiserver
systemctl start kube-scheduler
systemctl start kube-controller-manager
systemctl enable kube-apiserver
systemctl enable kube-scheduler
systemctl enable kube-controller-manager
cp /opt/kubernetes/bin/kubectl /bin/
检查启动结果
[root@k8s-master1 cfg]# ps aux |grep kube
[root@k8s-master1 cfg]# ps aux |grep kube |wc -l
4
[root@k8s-master1 bin]# kubectl get cs  #版本原因,我换了一个1.21的就成功了,不影响
NAME                 AGE
controller-manager   <unknown>
scheduler            <unknown>
etcd-0               <unknown>
etcd-2               <unknown>
etcd-1               <unknown>

配置TLS 基于bootstrap自动颁发证书
查看用户
在这里插入图片描述
执行自动颁发证书

[root@k8s-master1 bin]# cd
[root@k8s-master1 cfg]#  kubectl create clusterrolebinding kubelet-bootstrap \
	--clusterrole=system:node-bootstrapper \
	--user=kubelet-bootstrap

五、安装worker node节点

下载地址:https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
百度网盘链接:https://pan.baidu.com/s/1J8ADpuCNgb2rsiNhpb7Vvg
提取码:82p3

[root@k8s-node1 ~]# tar -zxvf kubernetes-node-linux-amd64.tar.gz
[root@k8s-node1 ~]# mkdir -p /opt/kubernetes/{bin,cfg,logs,ssl}   #创建配置,证书,启动,日志目录
[root@k8s-node1 ~]# cd kubernetes
[root@k8s-node1 kubernetes]# cp node/bin/{kubelet,kube-proxy} /opt/kubernetes/bin  #拷贝启动文件
[root@k8s-node1 kubernetes]# scp -r root@k8s-master1:/root/TLS/k8s/{ca.pem,kube-proxy.pem,kube-proxy-key.pem} /opt/kubernetes/ssl/ #拷贝master的证书

添加worker node的配置文件

cd /opt/kubernetes/cfg
cat > bootstrap.kubeconfig <<'EOF'
apiVersion: v1
clusters:
- cluster:
    certificate-authority: /opt/kubernetes/ssl/ca.pem
    server: https://10.0.19.127:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubelet-bootstrap
  name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
  user:
    token: c47ffb939f5ca36231d9e3121a252940
EOF
cat > kubelet.conf  <<'EOF'
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--hostname-override=k8s-node1 \
--network-plugin=cni \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF
cat > kubelet-config.yml  <<'EOF'
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
cat > kube-proxy.conf  <<'EOF'
KUBE_PROXY_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF
cat > kube-proxy-config.yml  <<'EOF'
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
  kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-node1
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
  scheduler: "rr"
iptables:
  masqueradeAll: true
EOF
cat > kube-proxy.kubeconfig  <<'EOF'
apiVersion: v1
clusters:
- cluster:
    certificate-authority: /opt/kubernetes/ssl/ca.pem
    server: https://10.0.19.127:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kube-proxy
  name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
  user:
    client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
    client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
EOF
	)修改配置文件(4个)
		[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
		修改一行:server: https://10.0.19.127:6443
		这里指定的是master的ip地址

		[root@k8s-node1 ~]# vi /opt/kubernetes/cfg/bootstrap.kubeconfig
		修改一行:server: https://10.0.19.127:6443
		这里指定的是master的ip地址

		[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kube-proxy-config.yml
		修改一行:hostnameOverride: k8s-node1
		这里是指定当前主机的主机名

		[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kubelet.conf 
		修改一行:--hostname-override=k8s-node1 \
		这里是指定当前主机的主机名

编辑worker node管理程序

cat > /usr/lib/systemd/system/kubelet.service  <<'EOF'
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Before=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat > /usr/lib/systemd/system/kube-proxy.service   <<'EOF'
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动kubelet和kube-proxy服务

systemctl start kube-proxy
systemctl start kubelet
systemctl enable kubelet
systemctl enable kube-proxy

六、在master节点为worker节点颁发证书(master节点操作)

[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE    REQUESTOR           CONDITION
node-csr-Uu61q1J1nAJ0AprrHc9rcSPVU0qSsD-Z4qDdapDvsWo   6m6s   kubelet-bootstrap   Pending

[root@k8s-master1 k8s]# kubectl certificate approve node-csr-Uu61q1J1nAJ0AprrHc9rcSPVU0qSsD-Z4qDdapDvsWo
		注意:名称必须用自己的名称,不要抄我的
[root@k8s-master1 bin]# kubectl get node
NAME        STATUS     ROLES    AGE   VERSION
k8s-node1   NotReady   <none>   6s    v1.16.10
k8s-node2   NotReady   <none>   15s   v1.16.10

七、安装网络插件

1)确认启用CNI

[root@k8s-node1 ~]# grep "cni" /opt/kubernetes/cfg/kubelet.conf
--network-plugin=cni \

2)安装CNI (注意这步操做所有加进来的node节点都要操作)
百度网盘链接:https://pan.baidu.com/s/1RFHmQfi0rky1yG8hmBpLZQ
提取码:d7no
node1

[root@k8s-node1 ~]#  mkdir -pv /opt/cni/bin /etc/cni/net.d
[root@k8s-node1 ~]# wget -c https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
[root@k8s-node1 ~]# tar xf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/

node2

[root@k8s-node2 ~]#  mkdir -pv /opt/cni/bin /etc/cni/net.d
[root@k8s-node2 ~]# wget -c https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
[root@k8s-node2 ~]# tar xf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/

3)在master上执行yaml脚本,实现在worker节点安装启动网络插件功能
网络插件所需的镜像地址链接(无网络可用,镜像每台worker都要有,或者做出镜像仓库,编写yaml文件直接下载):
https://pan.baidu.com/s/1S4ux43CrHVcln5yFZ9ewEQ
提取码:vq7b

yaml文件地址链接:https://pan.baidu.com/s/1aU_Cra-8Ycwcy9OgatexNw
提取码:sy8e

[root@k8s-master1 YAML]# kubectl apply -f kube-flannel.yaml
		注意:
			这个操作受限于网络,可能会需要5~10分钟才能执行成功
			如果网上太慢,会导致超时

4)授权apiserver可以访问kubelet

yaml文件链接:https://pan.baidu.com/s/1KKy7lXOBinWnqZjUwfUI4g
提取码:mmqc

[root@k8s-master1 YAML]# kubectl apply -f apiserver-to-kubelet-rbac.yaml
#3.验证yaml
[root@k8s-master1 ~]#  kubectl -n kube-system get clusterrole|grep system:kube-apiserver-to-kubelet
system:kube-apiserver-to-kubelet                                       17h
[root@k8s-master1 ~]#  kubectl -n kube-system get clusterrolebinding|grep system:kube-apiserver
system:kube-apiserver                                  17h
 #以上命令有返回结果代表apiserver授权访问成功。
[root@k8s-master1 YAML]# kubectl get pods -n kube-system
NAME                          READY   STATUS              RESTARTS   AGE
kube-flannel-ds-amd64-6h5dg   1/1     Running             0          2m29s
kube-flannel-ds-amd64-cgbqj   1/1     Running             0          2m29s

查看worker节点的状态
[root@k8s-master1 YAML]# kubectl get nodes
NAME        STATUS     ROLES    AGE     VERSION
k8s-node1   Ready      <none>   5h33m   v1.16.0
k8s-node2   Ready      <none>   19m     v1.16.0

查看日志,发现iptables版本过低
在这里插入图片描述
解决方法(所以7.3centos版本太低)
https://blog.csdn.net/a13568hki/article/details/123571296

八、安装dns组件

dns组件所需的镜像地址链接(无网络可用,镜像每台worker都要有,或者做出镜像仓库,编写yaml文件直接下载):https://pan.baidu.com/s/1wtrkAg9vH7RDrtlNjXrylg
提取码:bkst

yaml文件链接:https://pan.baidu.com/s/1z2DdX1q1KzSE8O2lbs5AUQ
提取码:s39h
k8s中用来实现名称解析的是:CoreDNS

[root@k8s-master1 YAML]# kubectl apply -f coredns.yaml 
[root@k8s-master1 YAML]# kubectl get pods -n kube-system | grep coredns
coredns-6d8cfdd59d-x7k6t      1/1     Running   2          2d1h

测试,是否能查看svc名dns解析

[root@k8s-master1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP   36d
[root@k8s-master1 ~]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh 
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local

九、远程管理k8s

默认情况下,k8s仅仅可以在master节点进行管理操作

1)将管理工具复制到worker节点【master执行】

[root@k8s-master1 ~]# scp /bin/kubectl root@k8s-node2:/bin

2)生成管理员证书【master执行】

[root@k8s-master1 ~]# cd /root/TLS/k8s/
[root@k8s-master1 k8s]# vim admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "L": "BeiJing",
	  "ST": "BeiJing",
	  "O": "system:masters",
	  "OU": "System"
	}
  ]
}

颁发admin证书

[root@k8s-master1 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

3)创建kubeconfig文件【master执行】

设置集群参数

[root@k8s-master1 k8s]# kubectl config set-cluster kubernetes \
   --server=https://10.0.19.127:6443 \
   --certificate-authority=ca.pem \
   --embed-certs=true \
   --kubeconfig=config

设置客户端认证参数

[root@k8s-master1 k8s]# kubectl config set-credentials cluster-admin \
   --certificate-authority=ca.pem \
  --embed-certs=true \
   --client-key=admin-key.pem \
   --client-certificate=admin.pem \
   --kubeconfig=config

设置上下文参数

[root@k8s-master1 k8s]# kubectl config set-context default \
   --cluster=kubernetes \
   --user=cluster-admin \
   --kubeconfig=config

[root@k8s-master1 k8s]# kubectl config use-context default --kubeconfig=config

4)将生产的config文件发送到worker节点【master执行】

[root@k8s-master1 k8s]# scp config root@k8s-node2:/root/

5)在worker节点,基于config实现执行kubectl命令【worker执行】

[root@k8s-node2 ~]# kubectl get nodes --kubeconfig=/root/config
NAME        STATUS   ROLES    AGE    VERSION
k8s-node1   Ready    <none>   2d4h   v1.16.0
k8s-node2   Ready    <none>   47h    v1.16.0


[root@k8s-node2 ~]# mkdir ~/.kube
[root@k8s-node2 ~]# mv /root/config /root/.kube/
[root@k8s-node2 ~]# kubectl get nodes
NAME        STATUS   ROLES    AGE    VERSION
k8s-node1   Ready    <none>   2d4h   v1.16.0
k8s-node2   Ready    <none>   47h    v1.16.0

四、高可用部署

一、安装k8s组件

为了简单,从master1上复制配置好的k8s组件

[root@k8s-master1 ~]# scp -r /opt/kubernetes root@k8s-master2:/opt/
[root@k8s-master1 ~]# cd /usr/lib/systemd/system
[root@k8s-master1 system]# scp kube-apiserver.service  kube-controller-manager.service  kube-scheduler.service root@k8s-master2:/usr/lib/systemd/system
[root@k8s-master1 system]# scp /bin/kubectl root@k8s-master2:/bin/

在master2上修改apiserver的配置文件

[root@k8s-master2 ~]# vi /opt/kubernetes/cfg/kube-apiserver.conf

#修改两行:分别制定当前节点自己监听的IP
--bind-address=192.168.31.64 \
--advertise-address=192.168.31.64 \

启动master2的服务了

[root@k8s-master2 ~]# systemctl daemon-reload
[root@k8s-master2 ~]# systemctl restart kube-apiserver
[root@k8s-master2 ~]# systemctl restart kube-controller-manager
[root@k8s-master2 ~]# systemctl restart kube-scheduler
[root@k8s-master2 ~]# systemctl enable kube-apiserver
[root@k8s-master2 ~]# systemctl enable kube-controller-manager
[root@k8s-master2 ~]# systemctl enable kube-scheduler
#检查是否启动成功
[root@k8s-master2 ssl]# ps aux | grep kube
[root@k8s-master2 ssl]# kubectl get nodes

二、部署负载均衡

loadbalance-master 和 loadbalance-slave 分别安装nginx,keepalived
通过nginx 反向代理两个master的 kube-apiserver 服务
keepalived 设置健康检查 判断nginx 是否存活,如果一个节点nginx挂了,就会将vip 192.168.31.88 漂移到另一个节点。

1、安装nginx,keepalived

yum install -y nginx
yum install -y keepalived

2、修改nginx配置

vim /etc/nginx/nginx.conf
# For more information on configuration, see:
#   * Official English Documentation: http://nginx.org/en/docs/
#   * Official Russian Documentation: http://nginx.org/ru/docs/

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
                server 10.0.19.127:6443;
                server 10.0.19.128:6443;
            }
    
    server {
       listen 6443;
       proxy_pass k8s-apiserver;
    }
}


http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    include /etc/nginx/conf.d/*.conf;

    server {
        listen       80 default_server;
        listen       [::]:80 default_server;
        server_name  _;
        root         /usr/share/nginx/html;

        # Load configuration files for the default server block.
        include /etc/nginx/default.d/*.conf;

        location / {
        }

        error_page 404 /404.html;
            location = /40x.html {
        }

        error_page 500 502 503 504 /50x.html;
            location = /50x.html {
        }
    }
}

3、修改keepalived 主配置

[root@k8s-loadbalancer1 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   #notification_email {
   #  acassen@firewall.loc
   #  failover@firewall.loc
   # sysadmin@firewall.loc
   #}
   #notification_email_from Alexandre.Cassen@firewall.loc
   #smtp_server 192.168.200.1
   #smtp_connect_timeout 30
   router_id bogon
   #vrrp_skip_check_adv_addr
   #vrrp_strict
   #vrrp_garp_interval 0
   #vrrp_gna_interval 0
}

#检测Nginx的脚本
vrrp_script chk_nginx {
        script "/etc/keepalived/nginx_check.sh"  #脚本路径
        interval 2
        weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33 #网络接口 ip a 命令可以查看
    virtual_router_id 51
    mcast_src_ip 10.0.19.131
    priority 100
    #nopreempt
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        chk_nginx
    }
  
    virtual_ipaddress {
        10.0.19.133
    }
}

4、修改keepalived 从配置

[root@k8s-loadbalancer2 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   #notification_email {
   #  acassen@firewall.loc
   #  failover@firewall.loc
   # sysadmin@firewall.loc
   #}
   #notification_email_from Alexandre.Cassen@firewall.loc
   #smtp_server 192.168.200.1
   #smtp_connect_timeout 30
   router_id bogon
   #vrrp_skip_check_adv_addr
   #vrrp_strict
   #vrrp_garp_interval 0
   #vrrp_gna_interval 0
}

vrrp_script chk_nginx {
        script "/etc/keepalived/nginx_check.sh"
        interval 2
        weight -20
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    mcast_src_ip 10.0.19.132
    priority 90 #要比MASTER的权重小
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.0.19.133
    }
}

5、健康检查脚本check_nginx.sh

两个loadbalance节点都要有

[root@k8s-loadbalancer1 ~]# cat /etc/keepalived/nginx_check.sh 
#!/bin/bash
n=`ps -C nginx --no-heading|wc -l`
echo $n;
if [ $n -eq "0" ]; then
        killall keepalived
fi

6、 启动设置服务

systemctl start nginx keepalived;systemctl enable nginx keepalived;systemctl status nginx keepalived

验证高可用
当loadbalance1 的nginx 存活时,vip在 该节点
在这里插入图片描述

当loadbalance1的nginx进程不存在时
在这里插入图片描述

vip会飘移到loadbalance2
在这里插入图片描述

当loadbalance1重新启动nginx服务 vip会漂移回来(抢占模式)
在这里插入图片描述

7、修改所有Node连接VIP

将每个node节点 连接的kube-apiserver 地址由 10.0.19.127:6443 改成 10.0.19.133:6443 (vip)

[root@k8s-node1 ~]# cd /opt/kubernetes/cfg/
[root@k8s-node1 cfg]# sed -i s/10.0.19.127/10.0.19.133/g *

node节点重启kubelet kube-proxy

[root@k8s-node2 cfg]# systemctl restart kube-proxy
[root@k8s-node2 cfg]# systemctl restart kubelet

8、验证kube-apiserver的高可用

curl -k --header "kubelet--bootstrap:Brarer c47ffb939f5ca36231d9e3121a252940" https://10.0.19.133:6443/version

在这里插入图片描述
首先查看node节点的日志
在这里插入图片描述
在停掉一台master
在这里插入图片描述
在查看node日志
在这里插入图片描述
尝试查看节点状态
在这里插入图片描述

没有报错成功

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐