一 准备机器

三台Centos 8.2,至少2核CPU+2G内存+20G硬盘, uname -r >3.1

机器名作用IP
worker01master192.168.142.21
worker02node1192.168.142.22
worker03node2192.168.142.23
worker04node3192.168.142.24
worker05node4192.168.142.25

更改主机名

 192.168.142.21
 hostnamectl --transient set-hostname worker01(临时更改)
 hostnamectl --static set-hostname worker01(永久更改)
 192.168.142.22
 hostnamectl --transient set-hostname worker02(临时更改)
 hostnamectl --static set-hostname worker02(永久更改)
 192.168.142.23
 hostnamectl --transient set-hostname worker03(临时更改)
 hostnamectl --static set-hostname worker03(永久更改)
 192.168.142.24
 hostnamectl --transient set-hostname worker04(临时更改)
 hostnamectl --static set-hostname worker04(永久更改)
 192.168.142.25
 hostnamectl --transient set-hostname worker05(临时更改)
 hostnamectl --static set-hostname worker05(永久更改)

IP地址

[root@worker1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
#UUID=ce34c058-b578-483f-a6eb-39bb9614385c
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.142.21
NETMASK=255.255.255.0
GATEWAY=192.168.142.2
DNS1=192.168.142.0
DNS2=114.114.114.114
IPV6_PRIVACY=no

主机名称解析

[root@worker1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.142.20 worker0
192.168.142.21 worker1
192.168.142.22 worker2
192.168.142.23 worker3
192.168.142.24 worker4
192.168.142.25 worker5

二 Master和Node节点环境初始化

1.关闭防火墙

systemctl stop firewalld & systemctl disable firewalld

2.禁用SELINUX

sed -i "s/^SELINUX=enforcing/#SELINUX=enforcing\nSELINUX=disabled/" /etc/selinux/config
setenforce 0

3.配置时间服务器ntp,时间同步

yum -y install ntpdate
crontab -l 0 */1 * * * ntpdate time1.aliyun.com
这个根据环境需求配置内网ntp服务,或者外网ntp服务

4.关闭Swap分区

查看内存使用情况
[root@worker1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:            972         266         106           7         599         547
Swap:          2047           0        2047

关闭/etc/fstab对应的swap分区

swapoff -a 

注释/etc/fstab的swap分区

[root@worker1 ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Mon Sep  6 17:53:09 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=72a5cca7-a068-44e3-a13b-c1851d557f69 /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

5.安装Docker和Docker-comoses

[root@worker2 ~]# cat /etc/docker/daemon.json
{
"insecure-registries":["192.168.142.21:9098"],
"registry-mirrors": ["http://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"],
"exec-opts":["native.cgroupdriver=systemd"]
}

不配置exec-opts会有detected “cgroupfs” as the Docker cgroup driver. The
recommended driver is “systemd”.的警告

配置docker的yum源

yum install install yum-utils -y 
#yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

安装Docker

sudo yum install docker-ce
sudo systemctl start docker
sudo systemctl enable docker

配置pip源

[root@master ~]# cat .pip/pip.conf 
## Note, this file is written by cloud-init on first boot of an instance
## modifications made here will not survive a re-bundle.
###
[global]
index-url=http://mirrors.cloud.aliyuncs.com/pypi/simple/

[install]
trusted-host=mirrors.cloud.aliyuncs.com

没有.pip/pip.conf文件,则自己创建

安装Docker-compose

pip3 install docker-compose
#Centos8 默认的Python是python3 

6.内核调优参数(添加网桥过滤)

[root@worker1 ~]# cat /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它vm.overcommit_memory=1 # 不检查物理内存是否够用vm.panic_on_oom=0 # 开启 OOM
vm.swappiness=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720

系统加载

sysctl -p /etc/sysctl.d/kubernetes.conf
需要增加kubernetes参数
cat /etc/sysconfig/kubelet 
KUBELET_EXTRA_ARGS="--fail-swap-on=false"

7.开启LVS (可选项)

yum -y install ipset ipvsadm
[root@worker1 ~]# cat /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
#modprobe -- nf_conntrack
modprobe -- nf_conntrack_ipv4
授权、运行、检查是否加载
[root@worker1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh               12688  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  0
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack_ipv4      15053  2
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
nf_conntrack          139264  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
检查是否加载
[root@worker1 ~]# lsmod | grep -e ipvs -e nf_conntrack_ipv4
nf_conntrack_ipv4      15053  2
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
nf_conntrack          139264  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4

8.安装kubelet kubeadm kubectl

[root@worker1 ~]# cat /etc/yum.repos.d/kubernetes1.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

安装kubelet kubeadm kubectl,检查cri-tools和kubernetes-cni是否已经依赖安装

yum install -y kubelet kubeadm kubectl
为了实现docker使用的cgroupdriver与kubelet使用的 cgroup的一致性,建议修改如下文件内容。

[root@worker1 ~]# cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
systemctl enable kubelet && systemctl start kubelet
rpm -qa|grep cri-tools
rpm -qa|grep kubernetes-cni

检查docker和kubelet是否开机启动

systemctl list-unit-files|grep docker
systemctl list-unit-files|grep kubelet

三 Master上操作

1.查看Master需要安装的列表

[root@master k8s]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.21.0
k8s.gcr.io/kube-controller-manager:v1.21.0
k8s.gcr.io/kube-scheduler:v1.21.0
k8s.gcr.io/kube-proxy:v1.21.0
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0

2.下拉镜像脚本

[root@worker1 ~]# cat pull-images.sh
#!/bin/bash
images=(
        kube-apiserver:v1.22.1
        kube-controller-manager:v1.22.1
        kube-scheduler:v1.22.1
        kube-proxy:v1.22.1
        pause:3.5
        etcd:3.5.0-0
        coredns/coredns:v1.8.4

)
for imageName in ${images[@]};
do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName}
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName} k8s.gcr.io/${imageName}
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName}
done



部分拉取不下来的修补操作

docker search coredns
docker pull coredns/coredns
docker ps -a
docker images
docker tag coredns/coredns:latest k8s.gcr.io/coredns/coredns:v1.8.4
docker tag
docker images
docker rmi coredns/coredns:latest

执行下拉镜像脚本

sh pull-images.sh

3.生成初始化配置文件

kubeadm config print init-defaults > kubeadm-config.yaml

4.修改配置文件

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.199.232.99 #修改成本机IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.21.0
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"  #添加此配置
  serviceSubnet: 10.96.0.0/12
scheduler: {}
#末尾添加以下配置
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

5.初始化集群

kubeadm init --config=kubeadm-config.yaml --upload-certs |tee kubeadm-init.log

准备管理集群文件

[root@master1 ~]# mkdir .kube 
[root@master1 ~]# cp /etc/kubernetes/admin.conf ./.kube/config 
[root@master1 ~]# chown $(id -u):$(id -g) .kube/config

6.安装网络插件flannel

kubectl apply -f kube-flannel.yml
[root@master install-k8s]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

8.查看生成的kubeadm-init文件

[root@master install-k8s]# cat kubeadm-init.log 
[init] Using Kubernetes version: v1.21.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node] and IPs [10.96.0.1 10.199.232.99]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost node] and IPs [10.199.232.99 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost node] and IPs [10.199.232.99 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 67.502698 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
2135108638acb91eec76df20d2688fc17dc00c590fb7f4783b0abadbef5ef42b
[mark-control-plane] Marking the node node as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node node as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.199.232.99:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:b73d642a2deb12fcf40adf1f1eec1fad72ff9d892be78cd6854c456f11e1eae2 

并且执行

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

四 Node上操作









1.拉取Node需要执行的镜像,这个需要自己k8s集群运行的原理

kube-proxy:v1.21.0
pause:3.4.1

[root@worker02 ~]# sh pull-images.sh

images=(
	kube-proxy:v1.21.0
	pause:3.4.1
)
for imageName in ${images[@]};
do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName}
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName} k8s.gcr.io/${imageName}
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/${imageName}
done

2.安装网络插件flannel

 kubectl apply -f kube-flannel.yml

3.执行node节点加入集群命令

重置node节点,用于退出前一个环境k8s集群

kubeadm reset

加入集群命令

kubeadm join 10.199.232.99:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:b73d642a2deb12fcf40adf1f1eec1fad72ff9d892be78cd6854c456f11e1eae2

(4)查看运行状态
查看所有命名空间的pod

kubectl  get pod  --all-namespaces

查看node节点工作情况

NAME       STATUS   ROLES                  AGE    VERSION
node       Ready    control-plane,master   2d2h   v1.21.0
worker01   Ready    <none>                 2d     v1.21.0
worker02   Ready    <none>                 2d     v1.21.0

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐