安装K8s(1.26.0)

一、准备工作

两台节点(Rocky Linux 8):

使用docker做容器引擎

主机名IP地址
k8s-master1192.168.10.31
k8s-node1192.168.10.32
k8s-node2192.168.10.33

修改主机名以及hosts文件

#修改主机名略,直接看hosts文件
[root@k8s-master1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.31 k8s-master1
192.168.10.32 k8s-node1
192.168.10.33 k8s-node2

基础环境配置

#永久关闭selinux
sed -i 's/enforcing/disabled/g' /etc/selinux/config
#临时关闭selinux
setenforce 0 

#永久并立即关闭防火墙
systemctl disable firewalld --now  

#临时关闭swap
swapoff -a  
# 永久关闭swap  进去注释掉swap那一行
vim /etc/fstab  

#置时间同步
yum install ntpdate -y
ntpdate ntp.aliyun.com

#加载br_netfilter
modprobe br_netfilter

#安装配置ipset、ipvsadm
yum -y install ipset ipvsadm
cat  >/etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules 

#添加epel源 以后安装软件方便
yum install -y https://mirrors.aliyun.com/epel/epel-release-latest-8.noarch.rpm
sed -i 's|^#baseurl=https://download.example/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel*
sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel*


#升级系统软件
yum update -y

二、安装docker、cri-dockerd

安装docker:

#安装命令(containerd和docker)
yum install -y https://download.docker.com/linux/centos/8/x86_64/stable/Packages/containerd.io-1.4.3-3.1.el8.x86_64.rpm
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
 
yum install -y docker-ce
systemctl enable docker.service containerd --now

#验证docker:
docker version

#配置镜像加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://7dgzta36.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

sudo systemctl daemon-reload
sudo systemctl restart docker

k8s在1.24移除了dockershim不再直接依赖docker,所以需要安装cri-dockerd 运行时接口

安装cri-dockerd

#安装iproute-tc
yum install iproute-tc wget -y

#启用cri
在/etc/containerd/config.toml 注释掉disabled_plugins = [“cri”]
systemctl restart containerd


#安装cri-dockerd
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.0/cri-dockerd-0.3.0-3.el8.x86_64.rpm
rpm -ivh cri-dockerd-0.3.0-3.el8.x86_64.rpm

#修改ExecStart参数 指向阿里云
sed -i 's,^ExecStart.*,& --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7,' /usr/lib/systemd/system/cri-docker.service

#具体内容
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7

#启动cri-docker
systemctl daemon-reload
systemctl enable  cri-docker.service --now
systemctl enable  cri-docker.socket --now

三、安装kubeadm、kubelet、kubectl

采用阿里云开源镜像站的k8s源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#查看yum仓库中的版本
yum list -y kubeadm.x86_64 --showduplicates


yum install -y kubelet-1.24.0-0 kubeadm-1.24.0-0 kubectl-1.24.0-0

#为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。
vim /etc/sysconfig/kubelet
修改: KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

systemctl enable kubelet --now

四、初始化集群

kubeadm init \
  --apiserver-advertise-address=192.168.10.31 \
  --image-repository registry.aliyuncs.com/google_containers \
  --kubernetes-version v1.24.0 \
  --service-cidr=10.96.0.0/12 \
  --pod-network-cidr=10.244.0.0/16 \
  --cri-socket unix:///var/run/cri-dockerd.sock \
  --ignore-preflight-errors=all
  • –apiserver-advertise-address 集群通告地址
  • –image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
  • –kubernetes-version K8s版本,与上面安装的一致
  • –service-cidr 集群内部虚拟网络,Pod统一访问入口
  • –pod-network-cidr Pod网络,与下面部署的CNI网络组件yaml中保持一致
  • –cri-socket 指定cri-dockerd.sock

配置KUBECONFIG 对于root和普通用户有不同的方式(原理都一样)

命令如下

#普通用户执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#root用户执行
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile

向集群添加新节点,执行在kubeadm init输出的kubeadm join命令:

(需要加参数–cri-socket unix:///var/run/cri-dockerd.sock)

kubeadm join 192.168.10.31:6443 --token 77vyru.ahbzvy00hl5o5plk \
        --discovery-token-ca-cert-hash sha256:cfb22821069c326da1373c98f63210d52eaf124971dd9fbf9207dafb52b287ad \
        --cri-socket unix:///var/run/cri-dockerd.sock

默认token有效期为24小时,当过期之后,该token就不可用了。这时就需要重新创建token,可以直接使用命令快捷生成:

kubeadm token create --print-join-command

查看集群节点

[root@k8s-master1 ~]# kubectl get nodes
NAME          STATUS     ROLES           AGE     VERSION
k8s-master1   NotReady   control-plane   2m46s   v1.26.0
k8s-node1     NotReady   <none>          23s     v1.26.0
k8s-node2     NotReady   <none>          25s     v1.26.0
#部署完CNI之后STATUS就会变成Ready

五、配置容器网络接口(CNI)

注意:flannel和calico二选一

flannel

#下载kube-flannel.yml
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

#安装kube-flannel.yml
kubectl apply -f kube-flannel.yml
#如果配置了自动以的pod cird(非10.244.0.0/16段),需要先进行配置
#修改kube-flannel.yml里的net-conf.json内容为你定义的网段,默认内容如下
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }

验证

[root@k8s-master1 ~]# kubectl get pod -A
NAMESPACE      NAME                                  READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-ph48b                 1/1     Running   0          6m31s
kube-flannel   kube-flannel-ds-xxnjp                 1/1     Running   0          6m31s
kube-flannel   kube-flannel-ds-zpsmc                 1/1     Running   0          6m31s
kube-system    coredns-5bbd96d687-b6rbw              1/1     Running   0          10m
kube-system    coredns-5bbd96d687-rxn2c              1/1     Running   0          10m
kube-system    etcd-k8s-master1                      1/1     Running   0          11m
kube-system    kube-apiserver-k8s-master1            1/1     Running   0          11m
kube-system    kube-controller-manager-k8s-master1   1/1     Running   0          11m
kube-system    kube-proxy-dd6x7                      1/1     Running   0          10m
kube-system    kube-proxy-kbkcl                      1/1     Running   0          10m
kube-system    kube-proxy-w7kvj                      1/1     Running   0          9m49s
kube-system    kube-scheduler-k8s-master1            1/1     Running   0          11m

calico

#下载calico资源清单文件
wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml

#修改文件第13行,修改为使用kubeadm init ----pod-network-cidr对应的IP地址段
vim custom-resources.yaml
......
 11     ipPools:
 12     - blockSize: 26
 13       cidr: 10.224.0.0/16 
 14       encapsulation: VXLANCrossSubnet
......

#创建
kubectl create -f tigera-operator.yaml
kubectl create -f custom-resources.yaml

验证

[root@k8s-master1 ~]# kubectl get pod -A
NAMESPACE          NAME                                      READY   STATUS    RESTARTS   AGE
calico-apiserver   calico-apiserver-5dd8b7fb46-7sq98         1/1     Running   0          4m4s
calico-apiserver   calico-apiserver-5dd8b7fb46-s6576         1/1     Running   0          4m4s
calico-system      calico-kube-controllers-594d54f99-g8qqd   1/1     Running   0          8m6s
calico-system      calico-node-698dv                         1/1     Running   0          8m7s
calico-system      calico-node-n5xff                         1/1     Running   0          8m7s
calico-system      calico-node-nt9dd                         1/1     Running   0          8m7s
calico-system      calico-typha-76cc6cf66-9s4sl              1/1     Running   0          8m5s
calico-system      calico-typha-76cc6cf66-rwp4n              1/1     Running   0          8m7s
calico-system      csi-node-driver-72htd                     2/2     Running   0          4m46s
calico-system      csi-node-driver-brlkt                     2/2     Running   0          7m15s
calico-system      csi-node-driver-gzp7l                     2/2     Running   0          6m26s
kube-system        coredns-74586cf9b6-bkr7k                  1/1     Running   0          37m
kube-system        coredns-74586cf9b6-lcffm                  1/1     Running   0          37m
kube-system        etcd-k8s-master1                          1/1     Running   0          37m
kube-system        kube-apiserver-k8s-master1                1/1     Running   0          37m
kube-system        kube-controller-manager-k8s-master1       1/1     Running   0          37m
kube-system        kube-proxy-4t9pt                          1/1     Running   0          35m
kube-system        kube-proxy-82svm                          1/1     Running   0          35m
kube-system        kube-proxy-zsrv6                          1/1     Running   0          37m
kube-system        kube-scheduler-k8s-master1                1/1     Running   0          37m
tigera-operator    tigera-operator-65d6bf4d4f-qstmj          1/1     Running   0          14m

测试k8s集群

  • 验证Pod工作
  • 验证Pod网络通信
  • 验证DNS解析

在Kubernetes集群中创建一个pod,验证是否正常运行:

kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc

访问地址:http://NodeIP:Port

学习环境可以做

master去除污点

#早期版本去除污点
kubectl taint nodes --all node-role.kubernetes.io/master-
#最新版本去除污点
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐