1 部署前的环境准备

我用的三台服务器,环境准备部分,三台服务器都需操作

1.1 关闭防火墙,selinux

systemctl disable firewalld --now
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config

1.2 关闭交换分区,注释掉/etc/fstab的swap挂载

swapoff -a
[root@k8s-master ~]# vim /etc/fstab


#
# /etc/fstab
# Created by anaconda on Sun Jun 26 14:22:46 2022
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=717e495b-061b-44d9-b5a6-e4e2f92ee30a /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

1.3 同步集群时间

yum -y install ntpdate

接下来可以选择阿里云服务器同步时间,或者同步上海电信时间,这个选择很多,我只列举这两例子

ntpdate -u ntp.api.bz && hwclock -w

注: -w表示写入硬件时间
或者另一种同步方法,二选一即可

ntpdate ntp.aliyun.com

1.4 设置主机名,做域名解析

vim /etc/hosts

192.168.123.150 k8s-master
192.168.123.151 k8s-node1
192.168.123.139 k8s-node2

设置主机名

hostname set-hostname k8s-master
hostname set-hostname k8s-node1
hostname set-hostname k8s-node2

1.5 如果ip是dhcp模式,请改为static模式(我网卡是ens33,请检查自己网卡)

vim /etc/sysconfig/network-scripts/ifcfg-ens33

TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
NAME="ens33"
IPADDR=192.168.123.150
PREFIX=24
GATEWAY=192.168.123.2
DEVICE="ens33"
ONBOOT="yes"
DNS1="114.114.114.114"
DNS2="8.8.8.8"

操作完成需要重启network

systemctl restart network

注: 我将ipv4,ipv6为首的内容删掉了,uuid也删掉了,它自己会生成的。三台机器重复修改,IPADDR注意填写自己的ip,我不是公网ip

2 安装docker(已经有docker环境直接略过2.1步骤,三台服务器均需执行)

2.1 安装docker

yum install -y yum-utils device-mapper-persistent-data lvm2 git
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce -y

然后启动docker,并且设置开机自启

2.2 阿里仓库拉取镜像

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2

2.3 拉取 flannel 镜像

 docker pull quay.io/coreos/flannel:v0.14.0

2. 4 阿里云拉取的镜像tag全部改名,后续步骤才能成功

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2 k8s.gcr.io/kube-controller-manager:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.2 k8s.gcr.io/kube-proxy:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.2 k8s.gcr.io/kube-apiserver:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.2 k8s.gcr.io/kube-scheduler:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2

3 在三台服务器中安装kubeadm和kubelet

3.1 配置yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF

3.2 安装对应版本

yum install -y kubelet-1.20.2-0.x86_64 kubeadm-1.20.2-0.x86_64 kubectl-1.20.2-0.x86_64 ipvsadm

4 开机自启相关内核模板和配置转发相关参数

4.1 vim /etc/rc.local

[root@k8s-master ~]# vim /etc/rc.local
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4

给文件授权

chmod +x /etc/rc.local

4.2 配置转发参数

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

所有节点都操作完毕之后,重启 reboot

4.3 重启后检查模块是否加载成功

lsmod | grep ip_vs

在这里插入图片描述

5 配置启动kubelet(所有节点上操作)

5.1 配置变量

DOCKER_CGROUPS=`docker info |grep 'Cgroup' | awk ' NR==1 {print $3}'`

5.2 配置kubelet的cgroups

cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=k8s.gcr.io/pause:3.2"
EOF

5.3 开机自启kubelet,并且reload它

systemctl daemon-reload
systemctl enable kubelet && systemctl restart kubelet

以上三台服务器均操作,接下来是关键,在mastter节点上,需要初始化kubelet,或者无法成功启动

5.4 初始化kubelet

kubeadm init --kubernetes-version=v1.20.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.123.150 --ignore-preflight-errors=Swap

注: kubernetes-version 根据你拉取的容器来决定,
–pod-network-cidr自带的ip不需要修改。API端口advertise-address需要修改,改成master节点的ip

初始化过程中,会提示三条命令,复制粘贴即可,此外加入集群的方法出现在最后几行,需要复制备用。附上图片
在这里插入图片描述
其他节点不需要初始化,复制 kubeadm join最后几行,加入集群即可

6 配置使用网络插件(pod与pod之间用flannel进行通信)

6.1 下载flannel.yml文件

(可能要翻墙,我每次可以正常下载,以防万一我附上自己的文件)

curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

不知道为什么,上传不了yaml文件,只能粘贴到代码块中了

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

6.2 修改flannel的yaml文件中flannel镜像的版本

因为我前面拉取的flannel就像是v0.14.0 ,这两个地方改一下即可
在这里插入图片描述
(6的整个过程只需要master节点操作,服务器节点只需要后面加入集群即可)

7 运用修改完成的flannel文件

flanne文件我放在了root下,注意自己的路径

kubectl apply -f ~/kube-flannel.yml

等待一段时间,查看pod信息

kubectl get pods --namespace kube-system

8 其他节点,加入集群

5.4 步初始化kubelet时候,终端显示的最后几行,复制备用的内容,在其他节点运行即可

kubeadm join 192.168.123.150:6443 --token n4mhv4.u2i0we9jumwvyvnp     --discovery-token-ca-cert-hash sha256:3a06212d370ab4fed86975841502c063c468ce4e63eadac1fcddffdb56aa7114 

在master 节点检查其他节点是否加入成功

kubectl get nodes

结果如下:

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES                  AGE    VERSION
k8s-master   Ready    control-plane,master   126m   v1.20.2
k8s-node1    Ready    <none>                 111m   v1.20.2
k8s-node2    Ready    <none>                 111m   v1.20.2

看到状态为ready ,即搭建成功
如果觉得拉取的镜像太多,造成记忆负担,可以先行安装kubeadm,敲命令 kubuadm config imags list 则可以列出部署k8s需要的容器

 kubeadm config images list 

结果如下:
在这里插入图片描述
注,我这个方法显示出来的,是最新拉取的镜像(部署之前忘记了要拉取哪些镜像的有用方法),我本次实验使用的是1.20.2版本,如果按照我步骤安装的小伙伴们,敲这个命令显示的应该是我们已经安装了的版本,如图:
在这里插入图片描述

完结撒花~

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐