一、环境

IP角色
192.168.0.30VIP
192.168.0.31master1 + etcd
192.168.0.32master2 + etcd
192.168.0.33master3 + etcd
192.168.0.35node1
192.168.0.37node2
192.168.0.38node3

所有节点基础环境设置

yum install bash-completion vim wget lrzsz unzip net-tools -y
#添加hosts解析;
cat >/etc/hosts<<EOF
127.0.0.1 localhost localhost.localdomain
192.168.0.31 master1
192.168.0.32 master2
192.168.0.33 master3
192.168.0.35 node1
192.168.0.37 node2
192.168.0.38 node3
EOF
#临时关闭selinux和防火墙;
sed -i '/SELINUX/s/enforcing/disabled/g'  /etc/sysconfig/selinux
setenforce  0
systemctl   stop     firewalld.service
systemctl   disable   firewalld.service
#同步节点时间;
yum install ntpdate -y
ntpdate  pool.ntp.org
rm -f /etc/localtime
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
/usr/sbin/ntpdate ntp1.aliyun.com
(echo "*/10 * * * * /usr/sbin/ntpdate asia.pool.ntp.org";crontab -l)|crontab
#修改对应节点主机名;
hostname `cat /etc/hosts|grep $(ifconfig|grep broadcast|awk '{print $2}'|grep 192)|awk '{print $2}'`;su
systemctl stop postfix
systemctl disable postfix
crontab -l
swapoff -a 
sed -i '/swap/d' /etc/fstab
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.conf

二、部署etcd集群 (192.168.0.31/32/33机器上)

1、集群节点上部署cfssl,执行
wget -c https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
 
wget -c https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

wget -c https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
2、在192.168.0.31上生成证书

mkdir cert && cd cert
执行

cat >  ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "hubei",
      "L": "hubei",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
#这里的证书有效期时间加长点,87600h = 10年。

cat >  ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.0.31",
    "192.168.0.32",
    "192.168.0.33"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "hubei",
      "L": "hubei",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

生成ca

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
mkdir -p /etc/kubernetes/pki/etcd/ssl

cp ca.pem /etc/kubernetes/pki/etcd/ssl/ca.pem
cp etcd.pem /etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem
cp etcd-key.pem /etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem

复制到其他集群节点 
mkdir -p /etc/kubernetes/pki/etcd/ssl
scp -r /etc/kubernetes master2:/etc/
scp -r /etc/kubernetes master3:/etc/

wget -c https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz

cd /data/k8scluster_packages
tar xvf etcd-v3.4.13-linux-amd64.tar.gz
cd etcd-v3.4.13-linux-amd64
cp -a etcd  etcdctl /usr/local/bin/

复制到其他集群节点 
scp  -P 22 -r etcd etcdctl master2:/usr/local/bin/
scp  -P 22 -r etcd etcdctl master3:/usr/local/bin/
3、设置启动服务

在192.168.0.31上

cat >  /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
 
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
    --name=master1 \
    --cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --peer-cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --peer-key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --initial-advertise-peer-urls=https://192.168.0.31:2380 \
    --listen-peer-urls=https://192.168.0.31:2380 \
    --listen-client-urls=https://192.168.0.31:2379,http://127.0.0.1:2379 \
    --advertise-client-urls=https://192.168.0.31:2379 \
    --initial-cluster-token=etcd-cluster-0 \
    --initial-cluster=master1=https://192.168.0.31:2380,master2=https://192.168.0.32:2380,master3=https://192.168.0.33:2380 \
    --initial-cluster-state=new \
    --max-request-bytes=33554432 \
    --quota-backend-bytes=6442450944 \
    --heartbeat-interval=250 \
    --election-timeout=2000 \
    --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

在192.168.0.32 master2上执行

192.168.0.32 master2上执行
cat >  /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
 
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
    --name=master2 \
    --cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --peer-cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --peer-key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --initial-advertise-peer-urls=https://192.168.0.32:2380 \
    --listen-peer-urls=https://192.168.0.32:2380 \
    --listen-client-urls=https://192.168.0.32:2379,http://127.0.0.1:2379 \
    --advertise-client-urls=https://192.168.0.32:2379 \
    --initial-cluster-token=etcd-cluster-0 \
    --initial-cluster=master1=https://192.168.0.31:2380,master2=https://192.168.0.32:2380,master3=https://192.168.0.33:2380 \
    --initial-cluster-state=new \
    --max-request-bytes=33554432 \
    --quota-backend-bytes=6442450944 \
    --heartbeat-interval=250 \
    --election-timeout=2000 \
    --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

在192.168.0.33 master3上执行

192.168.0.33 master3上执行
cat >  /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
 
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
    --name=master3 \
    --cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --peer-cert-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --peer-key-file=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
    --trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --initial-advertise-peer-urls=https://192.168.0.33:2380 \
    --listen-peer-urls=https://192.168.0.33:2380 \
    --listen-client-urls=https://192.168.0.33:2379,http://127.0.0.1:2379 \
    --advertise-client-urls=https://192.168.0.33:2379 \
    --initial-cluster-token=etcd-cluster-0 \
    --initial-cluster=master1=https://192.168.0.31:2380,master2=https://192.168.0.32:2380,master3=https://192.168.0.33:2380 \
    --initial-cluster-state=new \
    --max-request-bytes=33554432 \
    --quota-backend-bytes=6442450944 \
    --heartbeat-interval=250 \
    --election-timeout=2000 \
    --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

4、启动etcd集群

mkdir -p /var/lib/etcd && ll -a /var/lib/etcd
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd

检查etcd各节点是否正常

for i in 31 32 33 ;do  ip=192.168.0.$i ;\
    echo "  $ip  " ;\
    etcdctl \
    --endpoints=https://$ip:2379 --cacert=/etc/kubernetes/pki/etcd/ssl/ca.pem \
    --cert=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
    --key=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem endpoint health 
done

etcdctl \
  -w table --cacert=/etc/kubernetes/pki/etcd/ssl/ca.pem \
  --cert=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
  --key=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
  --endpoints=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 endpoint statu

检查结果

[root@master1 etcd-v3.4.13-linux-amd64]# for i in 31 32 33 ;do  ip=192.168.0.$i ;\
>     echo "  $ip  " ;\
>     etcdctl \
>     --endpoints=https://$ip:2379 --cacert=/etc/kubernetes/pki/etcd/ssl/ca.pem \
>     --cert=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
>     --key=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem endpoint health 
> 
> ^C
[root@master1 etcd-v3.4.13-linux-amd64]# for i in 31 32 33 ;do  ip=192.168.0.$i ;\
>     echo "  $ip  " ;\
>     etcdctl \
>     --endpoints=https://$ip:2379 --cacert=/etc/kubernetes/pki/etcd/ssl/ca.pem \
>     --cert=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
>     --key=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem endpoint health 
> done
  192.168.0.31  
https://192.168.0.31:2379 is healthy: successfully committed proposal: took = 8.355237ms
  192.168.0.32  
https://192.168.0.32:2379 is healthy: successfully committed proposal: took = 7.639998ms
  192.168.0.33  
https://192.168.0.33:2379 is healthy: successfully committed proposal: took = 7.088474ms
[root@master1 etcd-v3.4.13-linux-amd64]# etcdctl \
>   -w table --cacert=/etc/kubernetes/pki/etcd/ssl/ca.pem \
>   --cert=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem \
>   --key=/etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem \
>   --endpoints=https://192.168.0.31:2379,https://192.168.0.32:2379,https://192.168.0.33:2379 endpoint statu
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|         ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.0.31:2379 | cef55d543e12e2d1 |  3.4.13 |   20 kB |      true |      false |         2 |          9 |                  9 |        |
| https://192.168.0.32:2379 | 1883465833aacd17 |  3.4.13 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| https://192.168.0.33:2379 | ce20b11bc2de1234 |  3.4.13 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@master1 etcd-v3.4.13-linux-amd64]# 
  

三、3台服务器安装配置keepalived + haproxy

参考上篇,改下IP即可
https://blog.csdn.net/oToyix/article/details/118110898
注:VIP这里为:192.168.0.30

四、Kubeadm安装配置K8S多节点 及连接外部etcd

1、三个节点安装docker、docker-compose

执行下面脚本即可

if [ $id -ne 0 ];then
	echo "username is not root,please use root,now exit"
	exit
fi

cd /usr/local/
echo "download docker-19.03.0.tgz, wait......"
wget -c http://mirrors.163.com/docker-ce/linux/static/stable/x86_64/docker-20.10.7.tgz
tar -xf docker-20.10.7.tgz
useradd -s /sbin/nologin docker -M

echo "set docker path"
cat>>/etc/profile<<EOF
export PATH=\$PATH:/usr/local/docker/
EOF

mkdir /etc/docker

echo "daemon.json"
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
}
EOF

cd docker
echo "cp -rf * /usr/bin/"
cp -rf * /usr/bin/
echo "show version"
docker --version

echo "download docker-compose"
wget -c https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
mv docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
chmod a+x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version

echo "docker.sevice in systemd"
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF

echo "reload daemon"
systemctl daemon-reload
echo "restart docker"
systemctl restart docker
docker ps


2、所有节点,内核优化
cat > /etc/modules-load.d/ipvs.conf <<EOF
# Load IPVS at boot
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
systemctl enable --now systemd-modules-load.service
#确认内核模块加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安装ipset、ipvsadm
yum install -y ipset ipvsadm
#配置内核参数;
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

3、所有节点-添加yum源
cat>>/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

4、所有节点安装 kubeadm kubelet kubectl
#安装Kubeadm;
yum install -y kubeadm-1.20.4 kubelet-1.20.4 kubectl-1.20.4
rpm -qa|grep kube
#kubeadm-1.20.4-0.x86_64
#kubectl-1.20.4-0.x86_64
#kubelet-1.20.4-0.x86_64
#kubernetes-cni-0.8.7-0.x86_64
#启动kubelet服务
systemctl enable kubelet.service
systemctl start kubelet.service

5、master1节点 - 初始化Master集群

K8S集群引入Haproxy高可用集群,此时整个集群需要重新初始化,创建初始化kubeadmin-init.yaml配置文件。
#打印默认初始化配置信息至yaml文件中;

kubeadm config print init-defaults >kubeadmin-init.yaml

将如下代码覆盖kubeadm-init.yaml文件。

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.31
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.0.30:8443"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
etcd:
  external:
    endpoints:
    - https://192.168.0.31:2379
    - https://192.168.0.32:2379
    - https://192.168.0.33:2379
    caFile: /etc/kubernetes/pki/etcd/ssl/ca.pem
    certFile: /etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client.pem
    keyFile: /etc/kubernetes/pki/etcd/ssl/apiserver-etcd-client-key.pem
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.4
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.10.0.0/16
scheduler: {}

然后执行如下命令初始化集群即可,操作指令如下:

kubeadm init --config kubeadmin-init.yaml --upload-certs
 kubeadm init --config kubeadmin-init.yaml --upload-certs
W0824 11:47:00.179201   10223 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"ClusterConfiguration"}: error converting YAML to JSON: yaml: unmarshal errors:
  line 14: key "etcd" already set in map
[init] Using Kubernetes version: v1.20.4
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.7. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master1] and IPs [10.10.0.1 192.168.0.31 192.168.0.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate generation
[certs] External etcd mode: Skipping etcd/peer certificate generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 77.509338 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
ccbd7d3239e92eb10b061df884ad11c08560a0a0328d64e69b01cdd785dea8d6
[mark-control-plane] Marking the node master1 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.0.30:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1ea9fd60150821534bb1cf29b3be7bd690d7e57249563d3a491fada865608383 \
    --control-plane --certificate-key ccbd7d3239e92eb10b061df884ad11c08560a0a0328d64e69b01cdd785dea8d6

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.30:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1ea9fd60150821534bb1cf29b3be7bd690d7e57249563d3a491fada865608383 

去除Master节点污点,使其可以分配Pod资源;

kubectl taint nodes --all node-role.kubernetes.io/master-

这里因为节点足够,就不去污点了
根据提示,需要执行如下指令;

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
master2、 以master节点身份加入master集群
  kubeadm join 192.168.0.30:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1ea9fd60150821534bb1cf29b3be7bd690d7e57249563d3a491fada865608383 \
    --control-plane --certificate-key ccbd7d3239e92eb10b061df884ad11c08560a0a0328d64e69b01cdd785dea8d6

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

node加入集群

kubeadm join 192.168.0.30:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1ea9fd60150821534bb1cf29b3be7bd690d7e57249563d3a491fada865608383 
查看节点信息
[root@master1 ~]# kubectl get nodes
NAME      STATUS     ROLES                  AGE     VERSION
master1   NotReady   control-plane,master   8m34s   v1.20.4
master2   NotReady   control-plane,master   5m34s   v1.20.4
master3   NotReady   control-plane,master   2m42s   v1.20.4
node1     NotReady   <none>                 32s     v1.20.4
node2     NotReady   <none>                 16s     v1.20.4
node3     NotReady   <none>                 14s     v1.20.4
[root@master1 ~]# kubectl get node -o wide    
NAME      STATUS     ROLES                  AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
master1   NotReady   control-plane,master   8m56s   v1.20.4   192.168.0.31   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7
master2   NotReady   control-plane,master   5m56s   v1.20.4   192.168.0.32   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7
master3   NotReady   control-plane,master   3m4s    v1.20.4   192.168.0.33   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7
node1     NotReady   <none>                 54s     v1.20.4   192.168.0.35   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7
node2     NotReady   <none>                 38s     v1.20.4   192.168.0.37   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7
node3     NotReady   <none>                 36s     v1.20.4   192.168.0.38   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   docker://20.10.7

五、K8S节点网络配置(Flanneld)

Fanneld定义POD的网段为: 10.244.0.0/16,POD容器的IP地址会自动分配10.244开头的网段IP。安装Flanneld网络插件指令如下:

#下载Fanneld插件YML文件;
yum install wget -y
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#提前下载Flanneld组建所需镜像;
for i in $(cat kube-flannel.yml |grep image|awk -F: '{print $2":"$3}'|uniq );do docker pull $i ;done
#应用YML文件;
[root@master1 ~]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
#查看Flanneld网络组建是否部署成功;
[root@master1 ~]# kubectl -n kube-system get pods|grep -aiE flannel
kube-flannel-ds-cq8vt             0/1     Init:0/1   0          4s
kube-flannel-ds-jq8dk             0/1     Init:0/1   0          4s
kube-flannel-ds-kzwvc             0/1     Init:0/1   0          4s
kube-flannel-ds-vpvbm             0/1     Init:0/1   0          4s
kube-flannel-ds-wqzcj             0/1     Init:0/1   0          4s
kube-flannel-ds-xqr97             0/1     Init:0/1   0          4s

kube-flannel.yml内容为

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

查看pod

[root@master2 ~]# kubectl -n kube-system get pods
NAME                              READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-2m6dw          1/1     Running   0          31m
coredns-7f89b7bc75-qqwqz          1/1     Running   0          31m
kube-apiserver-master1            1/1     Running   0          31m
kube-apiserver-master2            1/1     Running   0          28m
kube-apiserver-master3            1/1     Running   0          25m
kube-controller-manager-master1   1/1     Running   0          31m
kube-controller-manager-master2   1/1     Running   0          28m
kube-controller-manager-master3   1/1     Running   0          25m
kube-flannel-ds-cq8vt             1/1     Running   0          112s
kube-flannel-ds-jq8dk             1/1     Running   0          112s
kube-flannel-ds-kzwvc             1/1     Running   0          112s
kube-flannel-ds-vpvbm             1/1     Running   0          112s
kube-flannel-ds-wqzcj             1/1     Running   0          112s
kube-flannel-ds-xqr97             1/1     Running   0          112s
kube-proxy-8czp4                  1/1     Running   0          23m
kube-proxy-8k6fd                  1/1     Running   0          25m
kube-proxy-8r5gr                  1/1     Running   0          31m
kube-proxy-cnt5s                  1/1     Running   0          23m
kube-proxy-k6x2q                  1/1     Running   0          28m
kube-proxy-wv87v                  1/1     Running   0          23m
kube-scheduler-master1            1/1     Running   0          31m
kube-scheduler-master2            1/1     Running   0          28m
kube-scheduler-master3            1/1     Running   0          25m

六、K8S开启IPVS模块

修改kube-proxy的configmap,在config.conf中找到mode参数,改为mode: "ipvs"然后保存:

kubectl -n kube-system get cm kube-proxy -o yaml | sed ‘s/mode: “”/mode: “ipvs”/g’ | kubectl replace -f -

#或者手动修改

kubectl -n kube-system edit cm kube-proxy
kubectl -n kube-system get cm kube-proxy -o yaml | grep mode
    mode: "ipvs"
#重启kube-proxy pod        
[root@master1 ~]# kubectl -n kube-system delete pods -l k8s-app=kube-proxy
pod "kube-proxy-cxcrg" deleted
pod "kube-proxy-gqsbh" deleted
pod "kube-proxy-wg79l" deleted
#确认ipvs模式开启成功
kubectl -n kube-system logs -f -l k8s-app=kube-proxy | grep ipvs
[root@master3 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.10.0.1:443 rr
  -> 192.168.0.31:6443            Masq    1      0          0         
  -> 192.168.0.32:6443            Masq    1      0          0         
  -> 192.168.0.33:6443            Masq    1      0          0         
TCP  10.10.0.10:53 rr
  -> 10.244.4.2:53                Masq    1      0          0         
  -> 10.244.5.2:53                Masq    1      0          0         
TCP  10.10.0.10:9153 rr
  -> 10.244.4.2:9153              Masq    1      0          0         
  -> 10.244.5.2:9153              Masq    1      0          0         
UDP  10.10.0.10:53 rr
  -> 10.244.4.2:53                Masq    1      0          0         
  -> 10.244.5.2:53                Masq    1      0          0

七、Dashboard

Kubernetes实现的最重要的工作是对Docker容器集群统一的管理和调度,通常使用命令行来操作Kubernetes集群及各个节点,命令行操作非常不方便,如果使用UI界面来可视化操作,会更加方便的管理和维护。如下为配置kubernetes dashboard完整过程:
1、下载Dashboard配置文件;

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc5/aio/deploy/recommended.yaml

\cp recommended.yaml recommended.yaml.bak

2、修改文件recommended.yaml的39行内容,
#因为默认情况下,service的类型是cluster IP,需更改为NodePort的方式,便于访问,也可映射到指定的端口。 两行

 32 kind: Service
 33 apiVersion: v1
 34 metadata:
 35   labels:
 36     k8s-app: kubernetes-dashboard
 37   name: kubernetes-dashboard
 38   namespace: kubernetes-dashboard
 39 spec:
 40   type: NodePort     改了此行
 41   ports:
 42     - port: 443
 43       targetPort: 8443
 44       nodePort: 31001     加了此行
 45   selector:
 46     k8s-app: kubernetes-dashboard
 47 
 48 ---
 49 
 50 apiVersion: v1
 51 kind: Secret
 52 metadata:	

3、修改文件recommended.yaml的198行内容,
#因为默认情况下Dashboard为英文显示,可以设置为中文。

name: ACCEPT_LANGUAGE
value: zh

172 kind: Deployment
173 apiVersion: apps/v1
174 metadata:
175   labels:
176     k8s-app: kubernetes-dashboard
177   name: kubernetes-dashboard
178   namespace: kubernetes-dashboard
179 spec:
180   replicas: 1
181   revisionHistoryLimit: 10
182   selector:
183     matchLabels:
184       k8s-app: kubernetes-dashboard
185   template:
186     metadata:
187       labels:
188         k8s-app: kubernetes-dashboard
189     spec:
190       containers:
191         - name: kubernetes-dashboard
192           image: kubernetesui/dashboard:v2.0.0-rc5
193           imagePullPolicy: Always
194           ports:
195             - containerPort: 8443
196               protocol: TCP
197           env:
198             - name: ACCEPT_LANGUAGE				加了此行及下行
199               value: zh						加了此行
200           args:

4、创建Dashboard服务,指令操作如下:

[root@master1 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

5、查看Dashboard运行状态;

kubectl get pod -n kubernetes-dashboard
kubectl get svc -n kubernetes-dashboard

6、基于Token的方式访问,设置和绑定Dashboard权限,命令如下;

#创建Dashboard的管理用户;
kubectl create serviceaccount dashboard-admin -n kube-system
#将创建的dashboard用户绑定为管理用户;
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
#获取刚刚创建的用户对应的Token名称;
kubectl get secrets -n kube-system | grep dashboard
#查看Token的详细信息;
kubectl describe secrets -n kube-system $(kubectl get secrets -n kube-system | grep dashboard |awk '{print $1}')

 kubectl get secrets -n kube-system | grep dashboard
dashboard-admin-token-fsvhp                      kubernetes.io/service-account-token   3      10s
[root@master1 ~]# kubectl describe secrets -n kube-system $(kubectl get secrets -n kube-system | grep dashboard |awk '{print $1}')
Name:         dashboard-admin-token-fsvhp
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 9105bad8-461c-48fd-bb3e-6bc6e27819c7

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkdmbF9zMEdvNzhETTE1YzJBRWxLWFFqVlVObzZPdmhFTkg0TVN2MFd2azQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tZnN2aHAiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiOTEwNWJhZDgtNDYxYy00OGZkLWJiM2UtNmJjNmUyNzgxOWM3Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.VL9hT7DYVeFwQ5dSeibPZ6U-JPrOey9r5v09k0Z1LHbC9Jtdrf659PX5DVkW9unnVHGdO1t9Vg8DbMuHF9EAypgesle8RPRysIT-hm9XrkWwsRwM219OBodBgAiMcLXRl8_jYMqKpSTLT9ulbnA2LFMIxFqpMKjxgCS779eiwx1zh4YgPn-rf3U79_Z9PRgso5sYB-w3eVM7zpKF5sOqxdcohyY_xKzc5jwsgPzvM3XkXw0PEMG_TonGkuZVKziyIjPDBWTVe6OOLvmiDCpSqf2eodCTZyQAwt_1gMp77-YNq8jhAs0ElajmuXsrvXYrrkbMp22ZN-k8I2m-646jhQ

7、通过浏览器访问Dashboard WEB,https://192.168.0.47:31001/,如图所示,输入Token登录即可。
在这里插入图片描述

注:

1、其他master节点使用kubectl命令时提示

The connection to the server localhost:8080 was refused - did you specify the right host or port?
解决方法:

[root@master2 ~]# kubectl get nodes            
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@master2 ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
[root@master2 ~]# source /etc/profile
[root@master2 ~]# kubectl get nodes
NAME      STATUS     ROLES                  AGE   VERSION
master1   NotReady   control-plane,master   29m   v1.20.4
master2   NotReady   control-plane,master   26m   v1.20.4
master3   NotReady   control-plane,master   23m   v1.20.4
node1     NotReady   <none>                 21m   v1.20.4
node2     NotReady   <none>                 20m   v1.20.4
node3     NotReady   <none>                 20m   v1.20.4

2、新节点加入群集命令

kubeadm token create --print-join-command

3、查看token命令

 kubectl describe secrets -n kube-system $(kubectl get secrets -n kube-system | grep dashboard |awk '{print $1}')

4、机器重启后,恢复命令

systemctl stop etcd ;sleep 8 ;systemctl start etcd
ps -ef|grep haproxy|grep -v grep|awk '{print $2}'|xargs kill -9
systemctl stop keepalived
/usr/local/haproxy/sbin/haproxy   -f   /usr/local/haproxy/etc/haproxy.cfg
systemctl start keepalived

swapoff -a ;service docker restart;service kubelet restart;docker ps -aq|xargs docker restart

5、报401的话,在界面的dev中编辑

spec:
  volumes:
    - name: kubernetes-dashboard-certs
      secret:
        secretName: kubernetes-dashboard-certs
        defaultMode: 420
    - name: tmp-volume
      emptyDir: {}
  containers:
    - name: kubernetes-dashboard
      image: 'kubernetesui/dashboard:v2.0.0-rc5'
      args:
        - '--auto-generate-certificates'
        - '--namespace=kubernetes-dashboard'
        - '--token-ttl=68400'     增加了此行
      ports:
        - containerPort: 8443
          protocol: TCP
      env:
        - name: ACCEPT_LANGUAGE
          value: zh

6、k8s NFS动态PV中,1.20及之后的版本需要在apiserver中加入一行
vim /etc/kubernetes/manifests/kube-apiserver.yaml

spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=192.168.1.101
    - --allow-privileged=true
    - --feature-gates=RemoveSelfLink=false  增加了此行
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction

7、定义端口使用范围
vim /etc/kubernetes/manifests/kube-apiserver.yaml

    - kube-apiserver
    - --advertise-address=192.168.2.10
    - --feature-gates=RemoveSelfLink=false
    - --service-node-port-range=1-65535   增加此行
    - --allow-privileged=true

-----------------------------end

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐