一、集群信息
在这里插入图片描述
二、安装前准备
1、设置hostname(三台机)

k8s-master:
$ hostnamectl set-hostname k8s-master && bash
k8s-slave1:
$ hostnamectl set-hostname k8s-slave1 && bash
k8s-slave2:
$ hostnamectl set-hostname k8s-slave2 && bash

2、设置hosts解释(三台机)

[root@k8s-master ~]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.4.13 k8s-master
10.0.4.9 k8s-slave1
10.0.12.5 k8s-slave2

3、调整系统参数(三台机)
1、关闭swap使用

$ swapoff -a
$ sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

2、关闭selinux和防火墙

$ sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config 
$ setenforce 0 
$ systemctl disable firewalld && systemctl stop firewalld

3、修改内核参数

vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward=1 
vm.max_map_count=262144

4、内核参数生效

$ modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf

5、设置相应yun源(删除当前所有/etc/yum.repos.d/*.repo)

$ curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
$ curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
$ cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
$ yum clean all && yum makecache

三、安装docker-ce(三台机)
1、安装docker-ce社区版(指定了相应的版本)

$ yum install docker-ce-cli docker-ce

2、启动docker

$ systemctl enable docker && systemctl start docker

四、部署kubernetes集群
1、安装 kubeadm, kubelet 和 kubectl(三台机)

$ yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
$ systemctl enable kubelet
$ systemctl start kubelet

kubelet启动时报错

systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: activating (auto-restart) (Result: exit-code) since Thu 2021-10-21 15:45:17 CST; 4s ago
     Docs: https://kubernetes.io/docs/
  Process: 25663 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
 Main PID: 25663 (code=exited, status=1/FAILURE)

Oct 21 15:45:17 k8s-master systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
Oct 21 15:45:17 k8s-master systemd[1]: Unit kubelet.service entered failed state.
Oct 21 15:45:17 k8s-master systemd[1]: kubelet.service failed.

备注:安装完好,可能通过以下命令验证

$ kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.2", GitCommit:"8b5a19147530eaac9476b0ab82980b4088bbc1b2", GitTreeState:"clean", BuildDate:"2021-09-15T21:37:34Z", GoVersion:"go1.16.8", Compiler:"gc", Platform:"linux/amd64"}

2、初始化安装配置文件(master节点操作)

$ kubeadm config print init-defaults > kubeadm.yaml
$ cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.4.13     # apiserver地址,因为单master,所以配置master的节点内网IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers   # 修改成阿里镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.16.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16                  #指定pod运行的子网
  serviceSubnet: 10.96.0.0/12
scheduler: {}

3、根据初始化配置文件,初始化master节点
登录后复制
$ kubeadm init --config kubeadm.yaml
#以下命令待上面命令执行成功后执行:
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown ( i d − u ) : (id -u): (idu):(id -g) $HOME/.kube/config

镜像拉不下来报错如下(如果镜像改成了国内的阿里云就不会报错)

[root@k8s-master ~]# kubeadm init --config kubeadm.yaml
[init] Using Kubernetes version: v1.22.0
[preflight] Running pre-flight checks
	[WARNING Hostname]: hostname "node" could not be reached
	[WARNING Hostname]: hostname "node": lookup node on 183.60.83.19:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
error execution phase preflight: [preflight] Some fatal errors occurred:
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-apiserver:v1.22.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-controller-manager:v1.22.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-scheduler:v1.22.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-proxy:v1.22.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/pause:3.5: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/etcd:3.5.0-0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
	[ERROR ImagePull]: failed to pull image k8s.gcr.io/coredns/coredns:v1.8.4: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
, error: exit status 1
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

解决办法一:

[root@k8s-master ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.22.2
k8s.gcr.io/kube-controller-manager:v1.22.2
k8s.gcr.io/kube-scheduler:v1.22.2
k8s.gcr.io/kube-proxy:v1.22.2
k8s.gcr.io/pause:3.5
k8s.gcr.io/etcd:3.5.0-0
k8s.gcr.io/coredns/coredns:v1.8.4
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.2
v1.22.2: Pulling from google_containers/kube-apiserver
b49b96595fd4: Pull complete 
8342ce73f773: Pull complete 
a09b3a83ae84: Pull complete 
Digest: sha256:eb4fae890583e8d4449c1e18b097aec5574c25c8f0323369a2df871ffa146f41
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.2
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.2 k8s.gcr.io/kube-apiserver:v1.22.2
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.2
v1.22.2: Pulling from google_containers/kube-controller-manager
b49b96595fd4: Already exists 
8342ce73f773: Already exists 
d54253f5b912: Pull complete 
Digest: sha256:91ccb477199cdb4c63fb0c8fcc39517a186505daf4ed52229904e6f9d09fd6f9
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.2
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.2 k8s.gcr.io/kube-controller-manager:v1.22.2
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.2
v1.22.2: Pulling from google_containers/kube-scheduler
b49b96595fd4: Already exists 
8342ce73f773: Already exists 
87c424611632: Pull complete 
Digest: sha256:c76cb73debd5e37fe7ad42cea9a67e0bfdd51dd56be7b90bdc50dd1bc03c018b
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.2
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.2 k8s.gcr.io/kube-scheduler:v1.22.2
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.2
v1.22.2: Pulling from google_containers/kube-proxy
fe5d561940df: Pull complete 
4c7972451415: Pull complete 
Digest: sha256:561d6cb95c32333db13ea847396167e903d97cf6e08dd937906c3dd0108580b7
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.2
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.2 k8s.gcr.io/kube-proxy:v1.22.2
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5
3.5: Pulling from google_containers/pause
019d8da33d91: Pull complete 
Digest: sha256:1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 k8s.gcr.io/pause:3.5
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
3.5.0-0: Pulling from google_containers/etcd
5dea5ec2316d: Pull complete 
f19a5990e757: Pull complete 
ea2d08d27074: Pull complete 
677a5c6b24cf: Pull complete 
12201bf2f59f: Pull complete 
Digest: sha256:9ce33ba33d8e738a5b85ed50b5080ac746deceed4a7496c550927a7a19ca3b6d
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
3.5.0-0: Pulling from google_containers/etcd
Digest: sha256:9ce33ba33d8e738a5b85ed50b5080ac746deceed4a7496c550927a7a19ca3b6d
Status: Image is up to date for registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0 k8s.gcr.io/etcd:3.5.0-0
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns/coredns:v1.8.4
Error response from daemon: pull access denied for registry.cn-hangzhou.aliyuncs.com/google_containers/coredns/coredns, repository does not exist or may require 'docker login'
[root@k8s-master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4
v1.8.4: Pulling from google_containers/coredns
c6568d217a00: Pull complete 
bc38a22c706b: Pull complete 
Digest: sha256:6e5a02c21641597998b4be7cb5eb1e7b02c0d8d23cce4dd09f4682d463798890
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4
[root@k8s-master ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4 k8s.gcr.io/coredns/coredns:v1.8.4

解决办法二:

[root@k8s-master ~]# cat pull-images.sh 
for i in kube-apiserver:v1.22.0 kube-controller-manager:v1.22.0 kube-scheduler:v1.22.0 kube-proxy:v1.22.0 pause:3.5  etcd:3.5.0-0  coredns:v1.8.4 
do
   docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$i
   if [ $i == coredns:v1.8.4 ];then
       docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$i k8s.gcr.io/coredns/$i
   else
       docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$i k8s.gcr.io/$i
   fi
done

kubelet出现以下报错

Oct 21 14:03:51 k8s-master kubelet[29761]: Flag --network-plugin has been deprecated, will be removed along with dockershim.
Oct 21 14:03:51 k8s-master kubelet[29761]: Flag --network-plugin has been deprecated, will be removed along with dockershim.
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.161731   29761 server.go:440] "Kubelet version" kubeletVersion="v1.22.2"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.162050   29761 server.go:868] "Client rotation is on, will bootstrap in background"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.164473   29761 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem".
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.167472   29761 dynamic_cafile_content.go:155] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200271   29761 server.go:687] "--cgroups-per-qos enabled, but --cgroup-root was not specified.  defaulting to /"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200531   29761 container_manager_linux.go:280] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200655   29761 container_manager_linux.go:285] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: ContainerRuntime:docker CgroupsPerQOS
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200684   29761 topology_manager.go:133] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200696   29761 container_manager_linux.go:320] "Creating device plugin manager" devicePluginEnabled=true
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200748   29761 state_mem.go:36] "Initialized new in-memory state store"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200807   29761 kubelet.go:314] "Using dockershim is deprecated, please consider using a full-fledged CRI implementation"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200837   29761 client.go:78] "Connecting to docker on the dockerEndpoint" endpoint="unix:///var/run/docker.sock"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.200854   29761 client.go:97] "Start docker client with request timeout" timeout="2m0s"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.204638   29761 docker_service.go:566] "Hairpin mode is set but kubenet is not enabled, falling back to HairpinVeth" hairpinMode=promiscuous-bridge
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.204663   29761 docker_service.go:242] "Hairpin mode is set" hairpinMode=hairpin-veth
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.204835   29761 cni.go:239] "Unable to update cni config" err="no networks found in /etc/cni/net.d"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.209357   29761 cni.go:239] "Unable to update cni config" err="no networks found in /etc/cni/net.d"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.209430   29761 docker_service.go:257] "Docker cri networking managed by the network plugin" networkPluginName="cni"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.209527   29761 cni.go:239] "Unable to update cni config" err="no networks found in /etc/cni/net.d"
Oct 21 14:03:51 k8s-master kubelet[29761]: I1021 14:03:51.220480   29761 docker_service.go:264] "Docker Info" dockerInfo=&{ID:S4XU:NP7B:ZJBA:AGGV:LFDN:SN7D:WL4U:LVEU:JPZQ:26LW:AMY7:LBAN Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:11 Dr
Oct 21 14:03:51 k8s-master kubelet[29761]: E1021 14:03:51.220534   29761 server.go:294] "Failed to run kubelet" err="failed to run Kubelet: misconfiguration: kubelet cgroup driver: \"systemd\" is different from docker cgroup driver: \"cgroupfs\""
Oct 21 14:03:51 k8s-master systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
Oct 21 14:03:51 k8s-master systemd[1]: Unit kubelet.service entered failed state.
Oct 21 14:03:51 k8s-master systemd[1]: kubelet.service failed.

解决办法:

[root@k8s-slave2 ~]# cat /etc/docker/daemon.json 
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
[root@k8s-slave2 ~]# systemctl restart docker
[root@k8s-slave2 ~]# systemctl restart kubelet.service

重新执行init

[root@k8s-master ~]# kubeadm reset
[root@k8s-master ~]# kubeadm init --config kubeadm.yaml 
[init] Using Kubernetes version: v1.22.0
[preflight] Running pre-flight checks
	[WARNING Hostname]: hostname "node" could not be reached
	[WARNING Hostname]: hostname "node": lookup node on 183.60.83.19:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node] and IPs [10.96.0.1 10.0.4.13]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost node] and IPs [10.0.4.13 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost node] and IPs [10.0.4.13 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 8.502760 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node node as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node node as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.4.13:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:474bb8d19c670af97fa90bda7c4239eb7de6470acba1c834c1a57a703fd6442a

记住要执行上面最后那几行

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
  export KUBECONFIG=/etc/kubernetes/admin.conf

在slave机器上执行
kubeadm join 10.0.4.13:6443 --token abcdef.0123456789abcdef
–discovery-token-ca-cert-hash sha256:474bb8d19c670af97fa90bda7c4239eb7de6470acba1c834c1a57a703fd6442a

[root@k8s-slave2 ~]# kubeadm join 10.0.4.13:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:474bb8d19c670af97fa90bda7c4239eb7de6470acba1c834c1a57a703fd6442a
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

查询节点状态

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES                  AGE   VERSION
k8s-slave1   NotReady   <none>                 22m   v1.22.2
k8s-slave2   NotReady   <none>                 38s   v1.22.2
k8s-master   NotReady   control-plane,master   26m   v1.22.2

五、安装flannel插件(只需在master节点)
登录后复制

$ wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml
$ vi kube-flannel.yml

#修改配置,指定网卡名称,大概在文件的190行,添加一行配置:
在这里插入图片描述
#指定网络地址(与安装master指定的网段需一致)
在这里插入图片描述

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES                  AGE     VERSION
k8s-slave1   Ready    <none>                 30m     v1.22.2
k8s-slave2   Ready    <none>                 9m25s   v1.22.2
k8s-master   Ready    control-plane,master   35m     v1.22.2

最新kube-flannel配置文件

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.2
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: quay.io/coreos/flannel:v0.15.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.15.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐