1. 基础环境准备

// 升级服务器内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org 
yum  -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
yum -y --disablerepo="*" --enablerepo="elrepo-kernel" list available 
yum --enablerepo=elrepo-kernel install kernel-ml -y  

// 查看有多少个版本
cat /boot/grub2/grub.cfg |grep menuentry 

// 查看默认版本
grub2-editenv list 

// 升级到最新版本
grub2-set-default 'CentOS Linux (5.15.6-1.el7.elrepo.x86_64) 7 (Core)' 

// 重启服务器
reboot now

// 查看版本相关
hostnamectl

// 修改为上海时区
timedatectl set-timezone Asia/Shanghai

// 关闭swap,selinux
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disable/' /etc/selinux/config

// 设置防火墙默认规则为trusted
firewall-cmd --set-default-zone=trusted
firewall-cmd --reload

// 配置系统参数,将桥接的IPv4流量传递到iptables的链
echo br_netfilter >> /etc/modules-load.d/k8s.conf
cat > /etc/sysctl.d/k8s.conf<<EOF 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF 

//  加载br_netfilter模块
modprobe br_netfilter

//  查看是否加载
lsmod | grep br_netfilter

//使生效
sysctl --system

// 修改hosts文件
cat >>/etc/hosts<<EOF
10.0.0.100 master-01
10.0.0.101 master-02
10.0.0.102 master-03
10.0.0.103 node-01
10.0.0.104 node-02
10.0.0.105 node-03
10.0.0.106 node-04
10.0.0.107 node-05
10.0.0.108 node-06
10.0.0.109 keepalived
10.0.0.110 keepalived
EOF

// 卸载旧版docker
 sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

// 安装docker yum源
sudo yum install -y yum-utils
sudo yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

// 查看docker版本列表
yum list docker-ce --showduplicates | sort -r

// 安装特定版本的 Docker Engine
 yum install -y docker-ce-20.10.7-3.el7 docker-ce-cli-20.10.7-3.el7 containerd.io

// 添加k8s阿里云源
cat > /etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

// 安装k8s组件
yum install -y kubelet-1.20.0  kubeadm-1.20.0 kubectl-1.20.0 
systemctl enable kubelet && systemctl start kubelet

2.负载均衡+keepalive

// 两台负载均衡服务器安装keepalived和haproxy
yum -y install keepalived haproxy  
systemctl enable keepalived.service && systemctl enable haproxy.service
keepalived配置文件
master配置文件
cat > /etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 3
  weight -2
  fall 10
  rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 61
    priority 100
    authentication {
        auth_type PASS
        auth_pass 0WedWoc
    }
    virtual_ipaddress {
        10.0.0.111/24
    }
    track_script {
        check_apiserver
    }
}
backup配置文件
cat > /etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 3
  weight -2
  fall 10
  rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 61
    priority 101
    authentication {
        auth_type PASS
        auth_pass 0WedWoc
    }
    virtual_ipaddress {
        10.0.0.111/24
    }
    track_script {
        check_apiserver
    }
}
检测脚本
cat >/etc/keepalived/check_apiserver.sh<<EOF
APISERVER_VIP=vip地址
APISERVER_DEST_PORT=6443
errorExit() {
    echo "*** $*" 1>&2
    exit 1
}
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
    curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi
EOF

// 给脚本添加执行权限
chmod +x check_apiserver.sh
修改haproxy配置文件,2台主机文件相同
cat >/etc/haproxy/haproxy.cfg<<EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    log /dev/log local0
    log /dev/log local1 notice
    daemon

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 1
    timeout http-request    10s
    timeout queue           20s
    timeout connect         5s
    timeout client          20s
    timeout server          20s
    timeout http-keep-alive 10s
    timeout check           10s

#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
    bind *:6443
    mode tcp
    option tcplog
    default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
    option httpchk GET /healthz
    http-check expect status 200
    mode tcp
    option ssl-hello-chk
    balance     roundrobin
    server master-01  10.0.0.100:6443 check maxconn 2000
    server master-02  10.0.0.101:6443 check maxconn 2000
    server master-03  10.0.0.102:6443 check maxconn 2000
 EOF

// 启动haproxy keepalived
systemctl start keepalived.service 
systemctl start haproxy.service
systemctl enable keepalived.service 
systemctl enable keepalived.service 

3.部署网络插件flannel

// wget失败的话可以多试两次
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml

4.部署etcd集群

将 kubelet 配置为 etcd 的服务管理器
mkdir -p /etc/systemd/system/kubelet.service.d/

// 将下面的 "systemd" 替换为你的容器运行时所使用的 cgroup 驱动 #默认为cgroup
// 官方地址:https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
// 官方地址:https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker

cat >/etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf<<EOF
[Service]
[Service]
ExecStart=
# 将下面的 "systemd" 替换为你的容器运行时所使用的 cgroup 驱
# kubelet 的默认值为 "cgroupfs"。
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd
Restart=always
EOF

// 重启kubelet并检查状态
systemctl daemon-reload && systemctl restart kubelet && systemctl status kubelet

// 修改docker文件
vim /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "storage-driver": "overlay2",
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "1000m",
    "max-file": "50"
  }
}

// 重启
systemctl daemon-reload && systemctl restart docker.service  && systemctl status docker.service
创建kubeadm配置文件
# 使用以下脚本为每个将要运行 etcd 成员的主机生成一个 kubeadm 配置文件
# 使用 IP 或可解析的主机名替换 HOST0、HOST1 和 HOST2
export HOST0=10.0.0.100
export HOST1=10.0.0.101
export HOST2=10.0.0.102

//  创建临时目录来存储将被分发到其它主机上的文件
mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/
ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2})
NAMES=("infra0" "infra1" "infra2")

for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "${HOST}"
        peerCertSANs:
        - "${HOST}"
        extraArgs:
            initial-cluster: infra0=https://${ETCDHOSTS[0]}:2380,infra1=https://${ETCDHOSTS[1]}:2380,infra2=https://${ETCDHOSTS[2]}:2380
            initial-cluster-state: new
            name: ${NAME}
            listen-peer-urls: https://${HOST}:2380
            listen-client-urls: https://${HOST}:2379
            advertise-client-urls: https://${HOST}:2379
            initial-advertise-peer-urls: https://${HOST}:2380
EOF
done
证书生成
// 如果你已经拥有 CA,那么唯一的操作是复制 CA 的 crt 和 key 文件到 etc/kubernetes/pki/etcd/ca.crt 和 /etc/kubernetes/pki/etcd/ca.key。 复制完这些文件后继续下一步,“为每个成员创建证书”。
// 如果你还没有 CA,则在 $HOST0(你为 kubeadm 生成配置文件的位置)上运行此命令。

// 创建证书
kubeadm init phase certs etcd-ca

// 生成了以下两个证书
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/ca.key

// 查看证书有效期
for item in `find /etc/kubernetes/pki -maxdepth 2 -name "*.crt"`;do openssl x509 -in $item -text -noout| grep Not;echo ======================$item===============;done
           Not Before: Jul 19 03:26:55 2021 GMT
            Not After : Jul 17 03:26:55 2031 GMT
======================/etc/kubernetes/pki/etcd/ca.crt===============
为每个成员创建证书脚本
//  使用 IP 或可解析的主机名替换 HOST0、HOST1 和 HOST2
export HOST0=10.0.0.100
export HOST1=10.0.0.101
export HOST2=10.0.0.102
kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST2}/

//  清理不可重复使用的证书
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST1}/
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml

// 不需要移动 certs 因为它们是给 HOST0 使用的
//  清理不应从此主机复制的证书
find /tmp/${HOST2} -name ca.key -type f -delete
find /tmp/${HOST1} -name ca.key -type f -delete
复制证书和 kubeadm 配置 给host1、host2
scp -r /tmp/10.0.0.101/*  root@10.0.0.101:/etc/kubernetes/
scp -r /tmp/10.0.0.102/*  root@10.0.0.102:/etc/kubernetes/
确保已经所有预期的文件都存在
// $HOST0 所需文件的完整列表如下:
/tmp/${HOST0}
└── kubeadmcfg.yaml
---
/etc/kubernetes/pki
├── apiserver-etcd-client.crt
├── apiserver-etcd-client.key
└── etcd
    ├── ca.crt
    ├── ca.key
    ├── healthcheck-client.crt
    ├── healthcheck-client.key
    ├── peer.crt
    ├── peer.key
    ├── server.crt
    └── server.key

// 在 $HOST1 上:
/etc/kubernetes/
├── kubeadmcfg.yaml
├── manifests
│   └── etcd.yaml
└── pki
    ├── apiserver-etcd-client.crt
    ├── apiserver-etcd-client.key
    └── etcd
        ├── ca.crt
        ├── healthcheck-client.crt
        ├── healthcheck-client.key
        ├── peer.crt
        ├── peer.key
        ├── server.crt
        └── server.key
        
// 在 $HOST2 上:
/etc/kubernetes/
├── kubeadmcfg.yaml
├── manifests
│   └── etcd.yaml
└── pki
    ├── apiserver-etcd-client.crt
    ├── apiserver-etcd-client.key
    └── etcd
        ├── ca.crt
        ├── healthcheck-client.crt
        ├── healthcheck-client.key
        ├── peer.crt
        ├── peer.key
        ├── server.crt
        └── server.key
先docker pull 国内镜像,通过 docker tag 原镜像名:version 新镜像名:version 错误输出有版本号
tail -f /var/log/messages

failed pulling image \\\"k8s.gcr.io/pause:3.4.1\\\": Error response from daemon: Get https://k8s.gcr.io/v2/
failed to \"StartContainer\" for \"etcd\" with ImagePullBackOff: \"Back-off pulling image \\\"k8s.gcr.io/etcd:3.4.13-0\\\

docker pull  registry.aliyuncs.com/google_containers/pause:3.4.1
docker tag registry.aliyuncs.com/google_containers/pause:3.4.1 k8s.gcr.io/pause:3.4.1

docker pull  registry.aliyuncs.com/google_containers/etcd:3.4.13-0
docker tag registry.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0

// 查看
docker images
CONTAINER ID   IMAGE                    COMMAND                  CREATED              STATUS              PORTS     NAMES
3b6323d8f3e1   0369cf4303ff             "etcd --advertise-cl…"   About a minute ago   Up About a minute             k8s_etcd_etcd-etcd-01_kube-system_ff28b8af2ac35182784c4dd4c3c2d9f7_0
f48383b964b1   k8s.gcr.io/pause:3.4.1   "/pause"                 10 minutes ago       Up 10 minutes                 k8s_POD_etcd-etcd-01_kube-system_ff28b8af2ac35182784c4dd4c3c2d9f7_0
创建静态 Pod 清单
// 证书和配置已经就绪,是时候去创建清单了。 在每台主机上运行 kubeadm 命令来生成 etcd 使用的静态清单
[root@etcd-01 ~]# kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"

[root@etcd-02 ~]# kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"

[root@etcd-03 ~]# kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
可选:检查群集运行状况
docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:3.4.13-0 etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://10.0.0.100:2379 endpoint health --cluster

https://10.0.0.100:2379 is healthy: successfully committed proposal: took = 16.283339ms
https://10.0.0.101:2379 is healthy: successfully committed proposal: took = 19.44402ms
https://10.0.0.102:2379 is healthy: successfully committed proposal: took = 35.926451ms

5. Master安装

// 因为此k8s集群跟etcd集群部署在同3台机器上所以不用再去复制认证文件
// 创建kubeadm文件
[root@master-01 ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.21.2
imageRepository: registry.aliyuncs.com/google_containers
controlPlaneEndpoint: "10.0.0.111:6443"
etcd:
    external:
        endpoints:
        - https://10.0.0.100:2379
        - https://10.0.0.101:2379
        - https://10.0.0.102:2379
        caFile: /etc/kubernetes/pki/etcd/ca.crt
        certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
        keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key

// 初始化master节点
kubeadm init --config kubeadm-config.yaml --upload-certs
docker pull coredns/coredns:1.8.0
docker tag coredns/coredns:1.8.0 registry.aliyuncs.com/google_containers/coredns:v1.8.0

获取--token,Node加入
[root@master-01 ~]# kubeadm token create --print-join-command
kubeadm join 10.0.0.111:6443 --token cweoyn.c6gdm7cai5txqou5 --discovery-token-ca-cert-hash sha256:c8adbaa76eee03ea2a08bca6cd877de8a29cfec13506af95c366b3220049d84a

获取--certificate-key
[root@master-01 ~]# openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d' ' -f1
c8adbaa76eee03ea2a08bca6cd877de8a29cfec13506af95c366b3220049d84a

-- 加入
kubeadm join 10.0.0.111:6443 --token cweoyn.c6gdm7cai5txqou5 --discovery-token-ca-cert-hash sha256:c8adbaa76eee03ea2a08bca6cd877de8a29cfec13506af95c366b3220049d84a   --control-plane --certificate-key c8adbaa76eee03ea2a08bca6cd877de8a29cfec13506af95c366b3220049d84a
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐