k8s的1.20.15的kubeadm安装教程

1. 基础环境配置

1.1 配置信息
系统版本    centOS7.9
docker版本  19.03.x
kubernetes  1.20.x
Pod网段     172.168.0.0/16
service网段 10.96.0.0/16
1.2 所有节点修改/etc/hosts
vim /etc/hosts

192.168.20.81 k8s-master01
192.168.20.82 k8s-master02
192.168.20.83 k8s-master03
192.168.20.236 k8s-master-lb # 如果不是高可用集群,该IP为Master01的IP
192.168.20.84 k8s-node01
192.168.20.85 k8s-node02
1.3 配置yum源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
1.4 必备工具安装
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
1.5 所有节点关闭Firewalls、selinux、NetworkManager、swap
systemctl disable --now firewalld 
systemctl disable --now NetworkManager

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
1.6 所有节点设置时间同步
yum install ntpdate -y
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com    # 加入到crontab
1.7 所有节点配置limit
ulimit -SHn 65535

vim /etc/security/limits.conf   # 末尾添加如下内容
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
1.8 Master01配置ssh免密登录其他节点
ssh-keygen -t rsa

for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
1.9 所有节点升级系统和内核
yum update -y --exclude=kernel* && reboot   # 升级系统

wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm

yum localinstall -y kernel-ml*

# 更改内核启动顺序
grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

grubby --default-kernel   # 重启前检查默认内核
reboot
uname -a    # 重启后检查正在使用的内核
1.10 所有节点安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

vim /etc/modules-load.d/ipvs.conf 
  ip_vs
  ip_vs_lc
  ip_vs_wlc
  ip_vs_rr
  ip_vs_wrr
  ip_vs_lblc
  ip_vs_lblcr
  ip_vs_dh
  ip_vs_sh
  ip_vs_fo
  ip_vs_nq
  ip_vs_sed
  ip_vs_ftp
  ip_vs_sh
  nf_conntrack
  ip_tables
  ip_set
  xt_set
  ipt_set
  ipt_rpfilter
  ipt_REJECT
  ipip

systemctl enable --now systemd-modules-load.service
1.11 所有开启k8s必须的内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf

net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

EOF
sysctl --system
reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack    # 重启后查看内核参数 

2. 基本组件安装

2.1 安装docker-ce
yum list docker-ce --showduplicates | sort -r
yum install docker-ce-19.03.* docker-cli-19.03.* -y

cat > /etc/docker/daemon.json <<EOF    # 修改docker的cgroupdriver
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl daemon-reload && systemctl enable --now docker    # 所有节点设置开机自启动
2.2 安装kubeadm、kubelet、kubectl
yum list kubeadm.x86_64 --showduplicates | sort -r
yum install kubeadm-1.20* kubelet-1.20* kubectl-1.20* -y

cat >/etc/sysconfig/kubelet<<EOF    # 修改pause的镜像仓库
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF

systemctl daemon-reload
systemctl enable --now kubelet
2.3 master节点安装高可用组件
yum install keepalived haproxy -y
# 所有master节点都设置,将原内容换成下面的内容
vim /etc/haproxy/haproxy.cfg    

global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01	192.168.20.81:6443  check
  server k8s-master02	192.168.20.82:6443  check
  server k8s-master03	192.168.20.83:6443  check
# master01节点配置,将原内容换成下面的内容
vim /etc/keepalived/keepalived.conf     

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    mcast_src_ip 192.168.20.81
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.20.236
    }
    track_script {
       chk_apiserver
    }
}
# master02节点配置,将原内容换成下面的内容
vim /etc/keepalived/keepalived.conf     

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    mcast_src_ip 192.168.20.82
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.20.236
    }
    track_script {
       chk_apiserver
    }
}
# master03节点配置,将原内容换成下面的内容
vim /etc/keepalived/keepalived.conf     

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
 interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    mcast_src_ip 192.168.20.83
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.20.236
    }
    track_script {
       chk_apiserver
    }
}
# 所有master节点配置KeepAlived健康检查文件
vim /etc/keepalived/check_apiserver.sh   

#!/bin/bash
err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
chmod +x /etc/keepalived/check_apiserver.sh
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

# 测试VIP
ping 192.168.20.236 -c 4
telnet 192.168.20.236 16443

3. 集群初始化

3.1 初始化
vim kubeadm-config.yaml     # master01创建该文件

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.20.81
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 192.168.20.236
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.20.236:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.15
networking:
  dnsDomain: cluster.local
  podSubnet: 172.168.0.0/16
  serviceSubnet: 10.96.0.0/16
scheduler: {}
kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml   # 更新kubeadm文件
kubeadm config images pull --config /root/new.yaml  # 先下载镜像
systemctl enable --now kubelet
kubeadm init --config /root/new.yaml  --upload-certs    # 初始化

# 初始化成功后执行,master节点才能使用kubectl
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# kubectl命令补全
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

kubeadm reset -f ; ipvsadm --clear  ; rm -rf ~/.kube    # 如果初始化失败,重置后再次初始化
3.2 添加新的master节点和node节点
# 添加master节点
kubeadm join 192.168.20.236:16443 --token fgtxr1.bz6dw1tci1kbj977     --discovery-token-ca-cert-hash sha256:06ebf46458a41922ff1f5b3bc49365cf3dd938f1a7e3e4a8c8049b5ec5a3aaa5 \
    --control-plane --certificate-key 03f99fb57e8d5906e4b18ce4b737ce1a055de1d144ab94d3cdcf351dfcd72a8b

# 添加node节点
kubeadm join 192.168.20.236:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:8c92ecb336be2b9372851a9af2c7ca1f7f60c12c68f6ffe1eb513791a1b8a908

kubeadm token create --print-join-command   # 如果token过期,生成新token

kubeadm init phase upload-certs  --upload-certs     # Master需要生成--certificate-key
3.3 master01安装calico组件(这一部分内容需一行一行复制,不然容易出错)
git clone https://github.com/dotbalo/k8s-ha-install.git
cd /root/k8s-ha-install && git checkout manual-installation-v1.20.x && cd calico/

sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.20.81:2379,https://192.168.20.82:2379,https://192.168.20.83:2379"#g' calico-etcd.yaml

ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'`
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml

sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml

POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`

sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "172.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml   # 确保这个网段修改成功

kubectl apply -f calico-etcd.yaml
3.4 Metrics部署
scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node01:/etc/kubernetes/pki/front-proxy-ca.crt
scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node(其他节点自行拷贝):/etc/kubernetes/pki/front-proxy-ca.crt

cd /root/k8s-ha-install/metrics-server-0.4.x-kubeadm/
kubectl  create -f comp.yaml 
3.5 Dashboard部署(可选)
# 安装指定版本
cd /root/k8s-ha-install/dashboard/
kubectl  create -f .

# 安装最新版
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
# 创建管理员用户

vim admin.yaml    

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system


kubectl apply -f admin.yaml -n kube-system
# 登录dashboard

kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
    将ClusterIP更改为NodePort

kubectl get svc kubernetes-dashboard -n kubernetes-dashboard    # 查看端口号

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')    # 获取token值

--test-type --ignore-certificate-errors    # 如果无法访问dashboard,则右键谷歌浏览器快捷方式,在 目标里的最后 加上这行代码

4. 修改配置

4.1 必须修改
kubectl edit cm kube-proxy -n kube-system   # 将kube-proxy改为ipvs模式
    mode: “ipvs”

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system   # 更新Kube-Proxy的Pod

curl 127.0.0.1:10249/proxyMode    # 验证Kube-Proxy模式
4.2 可选修改(删除master节点的污点)
kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints    # 查看master节点的污点
kubectl  taint node  -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-    # 删除master节点的污点
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐