1、主机及所需软件版本

操作系统版本:CentOS7

kubernetes版本:v1.20.5

containerd版本:1.4.4

crictl版本:v1.20.0

k8s-master: 172.21.204.110

k8s-node03: 172.21.204.113

2、主机初始化配置

# 替换CentOS 7的CentOS-Base.repo文件(用华为云的,可选)

cp -a /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo

或者(没有yum工具的情况下用curl -o)
curl -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo

# 所有节点设置/etc/hosts主机名,请根据实际情况进行配置

vi /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.21.204.110 k8s-master
172.21.204.113 k8s-node03

# 验证mac地址uuid,保证各节点mac和uuid唯一

cat /sys/class/net/ens33/address
cat /sys/class/dmi/id/product_uuid

# 关闭防火墙

systemctl stop firewalld && systemctl disable firewalld

# 永久关闭seLinux(需重启系统生效)

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

# 关闭系统交换分区swap(k8s要求关闭swap)

swapoff -a

#注释掉/etc/fstab中的swap
vi /etc/fstab
#/dev/mapper/centos-swap     swap      swap     defaults     0     0

#确认swap是否被禁用(输出空值即为禁用)
cat /proc/swaps
Filename                                Type            Size    Used    Priority

# 加载所需内核模块

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# 加载ipvs模块(可选项,默认为iptables模式)
kuber-proxy代理支持iptables和ipvs两种模式,如果使用ipvs模式需要在初始化集群前所有节点加载ipvs模块并安装ipset工具,Linux kernel 4.19以上的内核版本使用nf_conntrack代替nf_conntrack_ipv4

cat > /etc/modules-load.d/ipvs.conf <<EOF
# Load IPVS at boot
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF

systemctl enable --now systemd-modules-load.service

#确认内核模块加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# or
cut -f1 -d " "  /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4

#安装ipset、ipvsadm
yum install -y ipset ipvsadm

# 设置sysctl 参数,允许iptables检查桥接流量,这些参数在重新启动后仍然存在

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
vm.swappiness                       = 0
net.bridge.bridge-nf-call-ip6tables = 1
EOF

#应用sysctl参数而无需重新启动
sudo sysctl --system

3、安装kubernetes组件

# 所有节点配置kubernetes软件源(使用阿里云镜像源)

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装kubelet kubeadm kubectl组件,所有节点都需要执行(默认安装最新版本)

#默认安装最新版本
yum install -y kubelet kubeadm kubectl

#指定版本号安装
yum install -y kubelet-1.20.5-0.x86_64 kubeadm-1.20.5-0.x86_64 kubectl-1.20.5-0.x86_64

# 配置kubelet使用containerd作为容器运行时,指定cgroupDriver为systemd模式(两种方法实现)

方法一:
#配置kubelet使用containerd(所有节点都要配置cgroup-driver=systemd参数,否则node节点无法自动下载和创建pod)

cat > /etc/sysconfig/kubelet <<EOF
KUBELET_EXTRA_ARGS=--cgroup-driver=systemd
EOF

方法二:

#如果不想修改/etc/sysconfig/kubelet配置,kubeadm init必须使用yaml文件来初始化传递cgroupDriver参数,可以通过如下命令导出默认的初始化配置

kubeadm config print init-defaults > kubeadm-config.yaml

#然后根据自己的需求修改配置,比如修改imageRepository的值,kube-proxy模式为ipvs,需要注意的是由于使用containerd作为运行时,所以在初始化节点的时候需要指定cgroupDriver为systemd模式

apiVersion: kubeadm.k8s.io/v1beta2 
bootstrapTokens: 
- groups: 
  - system:bootstrappers:kubeadm:default-node-token 
  token: abcdef.0123456789abcdef 
  ttl: 24h0m0s 
  usages: 
  - signing 
  - authentication 
kind: InitConfiguration 
localAPIEndpoint: 
  advertiseAddress: 172.21.204.110  
  bindPort: 6443 
nodeRegistration: 
  criSocket: /run/containerd/containerd.sock  
  name: k8s-master 
  taints: 
  - effect: NoSchedule 
    key: node-role.kubernetes.io/master 
--- 
apiServer: 
  timeoutForControlPlane: 4m0s 
apiVersion: kubeadm.k8s.io/v1beta2 
certificatesDir: /etc/kubernetes/pki 
clusterName: kubernetes 
controllerManager: {} 
dns: 
  type: CoreDNS 
etcd: 
  local: 
    dataDir: /var/lib/etcd 
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers 
kind: ClusterConfiguration 
kubernetesVersion: v1.20.5 
networking: 
  dnsDomain: cluster.local 
  podSubnet: 192.168.0.0/16
  serviceSubnet: 10.96.0.0/12 
scheduler: {} 
--- 
apiVersion: kubeproxy.config.k8s.io/v1alpha1 
kind: KubeProxyConfiguration 
mode: ipvs 
--- 
apiVersion: kubelet.config.k8s.io/v1beta1 
kind: KubeletConfiguration 
cgroupDriver: systemd

# 设置kubelet开机自启

systemctl enable --now kubelet && systemctl restart kubelet

4、安装containerd容器组件

# 所有节点配置containerd软件源(containerd组件默认在docker-ce源中)

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 查看containerd.io可用版本

yum search containerd.io --showduplicates

# 安装最新版containerd.io

yum install -y containerd.io

# 创建containerd配置文件config.toml

mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml

# 修改config.toml文件中的sandbox_image、endpoint、systemd cgroup参数

vi /etc/containerd/config.toml

[plugins."io.containerd.grpc.v1.cri"] 字段下的sandbox_image修改为如下
sandbox_image="registry.aliyuncs.com/google_containers/pause:3.2"

[plugins."io.containerd.grpc.v1.cri".registry]字段下的endpoint修改为如下
endpoint = ["https://registry.cn-hangzhou.aliyuncs.com"]

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]字段下的SystemdCgroup修改为如下
SystemdCgroup = true

# 或者使用sed脚本一键修改config.toml文件

sed -i "s#k8s.gcr.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml
sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml

# 重启containerd.io

systemctl daemon-reload
systemctl enable containerd && systemctl restart containerd

# 安装CRI客户端工具crictl
工具下载地址:https://github.com/kubernetes-sigs/cri-tools/releases/

wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.20.0/crictl-v1.20.0-linux-amd64.tar.gz

sudo tar zxvf crictl-v1.20.0-linux-amd64.tar.gz -C /usr/local/bin

cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

#或者执行以下语句添加参数
crictl config runtime-endpoint unix:/run/containerd/containerd.sock

# 验证是否可用

crictl pull nginx
crictl rmi nginx
crictl images

# 重启containerd和kubelet

systemctl daemon-reload
systemctl restart containerd && systemctl restart kubelet

# 最后确认kubelet和containerd版本

containerd --version
kubelet --version

5、在master节点启动集群

方法一:使用配置文件初始化

kubeadm init --config=kubeadm-config.yaml

方法二:使用参数初始化(指定版本、master地址、基础镜像源地址、自定义ip地址)

kubeadm init --kubernetes-version=v1.20.5 --apiserver-advertise-address=172.21.204.110 --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --service-cidr=10.96.0.0/16 --pod-network-cidr=192.168.0.0/16
简化版初始化(使用calico网络组件)
kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16

简化版初始化(使用flannel网络组件)
kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16

# --image-repository:指定镜像源

# 执行初始化(这里使用calico网络组件初始化)

kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16

# 启动之后,需要再执行以下,否则在使用kubectl的时候会报8080端口错误:

#非root用户
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

#root用户
export KUBECONFIG=/etc/kubernetes/admin.conf

6、部署calico网络组件(默认模式)

[root@k8s-master ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created

7、node节点加入

kubeadm join 172.21.204.110:6443 --token 7pw2tl.x2121e9p3dgr7ktb \
    --discovery-token-ca-cert-hash sha256:694e2924e2cf2211cefba492faf0259d0810d6afb72c5e941120b2911d8d18ab

默认方式添加node节点即可,不需要加--cri-socket /run/containerd/containerd.sock参数指定cri为containerd,kubernetes会自动寻找和加载containerd容器运行时

#查看node状态

[root@k8s-master ~]# kubectl get node
NAME         STATUS   ROLES                  AGE     VERSION
k8s-master   Ready    control-plane,master   3d21h   v1.20.5
k8s-node03   Ready    <none>                 3d21h   v1.20.5

# 查看pod状态

[root@k8s-master ~]# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS      RESTARTS   AGE
kube-system            calico-kube-controllers-69496d8b75-drq6n     1/1     Running     0          3d21h
kube-system            calico-node-4x5rf                            1/1     Running     0          3d21h
kube-system            calico-node-8bn96                            1/1     Running     0          3d21h
kube-system            coredns-7f89b7bc75-7pzx8                     1/1     Running     0          3d21h
kube-system            coredns-7f89b7bc75-8frqs                     1/1     Running     0          3d21h
kube-system            etcd-k8s-master                              1/1     Running     0          3d21h
kube-system            kube-apiserver-k8s-master                    1/1     Running     0          3d21h
kube-system            kube-controller-manager-k8s-master           1/1     Running     0          3d21h
kube-system            kube-proxy-4hxhl                             1/1     Running     0          3d20h
kube-system            kube-proxy-bshbb                             1/1     Running     0          3d20h
kube-system            kube-scheduler-k8s-master                    1/1     Running     0          3d21h

8、kube-proxy开启ipvs(可选)

#修改kube-proxy的configmap参数,设置mode值为ipvs
[root@k8s-master ~]# kubectl -n kube-system edit cm kube-proxy

#查看是否修改成功
[root@k8s-master ~]# kubectl -n kube-system get cm kube-proxy -o yaml | grep mode
    mode: "ipvs"

#重启kube-proxy pod        
[root@k8s-master ~]# kubectl -n kube-system delete pods -l k8s-app=kube-proxy
#确认ipvs模式是否开启成功
[root@k8s-master ~]# kubectl -n kube-system logs kube-proxy-4hxhl | grep ipvs
I0329 08:23:06.791059       1 server_others.go:258] Using ipvs Proxier.
[root@k8s-master ~]# kubectl -n kube-system logs kube-proxy-bshbb | grep ipvs
I0329 08:23:18.380155       1 server_others.go:258] Using ipvs Proxier.

日志中打印出Using ipvs Proxier,说明ipvs模式已经开启。

# 查看ipvs转发状态

[root@k8s-master ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 172.21.204.110:6443          Masq    1      0          0
TCP  10.96.0.10:53 rr
  -> 192.168.235.193:53           Masq    1      0          0
  -> 192.168.235.194:53           Masq    1      0          0
TCP  10.96.0.10:9153 rr
  -> 192.168.235.193:9153         Masq    1      0          0
  -> 192.168.235.194:9153         Masq    1      0          0
TCP  10.109.203.63:80 rr
  -> 192.168.135.130:80           Masq    1      0          5
UDP  10.96.0.10:53 rr
  -> 192.168.235.193:53           Masq    1      0          0
  -> 192.168.235.194:53           Masq    1      0          0

欢迎搜索关注微信公众号【运维散人】

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐