基于 kube-vip 搭建 高可用 Kubernetes v1.28.10 集群

1. 环境介绍

主机名称k8s-master-01k8s-master-02k8s-master-03k8s-node-01k8s-node-02
操作系统Centos 7Centos 7Centos 7Centos 7Centos 7
内核版本3.10.0-957.e17.x86_643.10.0-957.e17.x86_643.10.0-957.e17.x86_643.10.0-957.e17.x86_643.10.0-957.e17.x86_64
IP192.168.100.100192.168.100.110192.168.100.120192.168.100.130192.168.100.140
虚拟IP192.168.100.50192.168.100.50192.168.100.50————

2. 准备工作

2.1 修改主机名称

# 各自修改主机名称
hostnamectl set-hostname xxxx

2.2 修改hosts文件

cat > /etc/hosts <<EOF
192.168.100.50 lb.k8s.local
192.168.100.100 k8s-master-01
192.168.100.110 k8s-master-02
192.168.100.120 k8s-master-03
192.168.100.130 k8s-node-01
192.168.100.140 k8s-node-02
EOF

2.3 关闭防火墙和SLinux

# 关闭防火墙
systemctl disable --now firewalld.service

# 关闭SLinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0

2.4 配置SSH免密访问

2.4.1 主机名称: k8s-master-01 操作

ssh-keygen -f ~/.ssh/id_rsa -N '' -q

ssh-copy-id k8s-master-02
ssh-copy-id k8s-master-03
ssh-copy-id k8s-node-01
ssh-copy-id k8s-node-02

2.5 配置yum源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

yum makecache

2.6 禁用Swarp分区

swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab 

2.7 同步时间

# 时区调整,时间校准
date -R
timedatectl set-timezone Asia/Shanghai
yum -y install ntp

cat > /etc/nft.conf <<EOF
driftfile  /var/lib/ntp/drift
pidfile   /var/run/ntpd.pid
logfile /var/log/ntp.log
restrict    default kod nomodify notrap nopeer noquery
restrict -6 default kod nomodify notrap nopeer noquery
restrict 127.0.0.1
server 127.127.1.0
fudge  127.127.1.0 stratum 10
server ntp.aliyun.com iburst minpoll 4 maxpoll 10
restrict ntp.aliyun.com nomodify notrap nopeer noquery
EOF
# 同步
ntpdate ntp.aliyun.com

2.8 配置内核转发及网桥过滤

# 开启br_netfilter 机器重启就失效了
modprobe br_netfilter

###############################################################可 选#############################################################
# 设置开机自启动
# 在 /etc/rc.d/rc.local 文件末尾追加
cat >> /etc/rc.d/rc.local <<EOF 
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
EOF

# 创建文件
cat > /etc/sysconfig/modules/br_netfilter.modules <<EOF
#/bin/bash
modprobe br_netfilter
EOF

# 设置执行权限
chmod 755 /etc/sysconfig/modules/br_netfilter.modules
# 重启查看
###############################################################可 选#############################################################

# 确认下
lsmod | grep br_netfilter

# 出现下面的结果表示ok
[root@k8s-master-01 ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter


cat >> /etc/sysctl.d/k8s.conf<< EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv4.tcp_keepalive_time = 600
EOF

# 让其配置文件生效
sysctl -p  /etc/sysctl.d/k8s.conf

2.9 安装 IPVS

yum install -y ipset ipvsadm

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# 设置执行权限
chmod 755 /etc/sysconfig/modules/ipvs.modules

# 执行脚本
bash /etc/sysconfig/modules/ipvs.modules

# 查看
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

3. 安装 containerd

3.1 安装

sudo yum remove containerd.io
sudo yum install -y yum-utils wget net-tools
# 配置docker yum 源
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安装containerd
yum install -y containerd.io

# 删除日软件包默认的配置文件
mv /etc/containerd/config.toml /tmp

# 生成默认的配置文件
containerd config default > /etc/containerd/config.toml

# 修改内容SystemdCgroup 设置成true
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml

# 将sandbox_image下载地址改为阿里云地址 并且设置docker 镜像加速
sed -i 's#sandbox_image = "registry.k8s.io/pause:3.6"#sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"#g' /etc/containerd/config.toml

# 设置dockerhub 镜像加速
sed -i 's#config_path = ""#config_path = "/etc/containerd/certs.d"#g' /etc/containerd/config.toml

mkdir -p /etc/containerd/certs.d/docker.io

cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://dockerhub.icu"]
  capabilities = ["pull", "resolve"]

[host."https://docker.chenby.cn"]
  capabilities = ["pull", "resolve"]

[host."https://docker.1panel.live"]
  capabilities = ["pull", "resolve"]

[host."https://docker.aws19527.cn"]
  capabilities = ["pull", "resolve"]

[host."https://docker.anyhub.us.kg"]
  capabilities = ["pull", "resolve"]

[host."https://dhub.kubesre.xyz"]
  capabilities = ["pull", "resolve"]
EOF

# 启动
systemctl daemon-reload
systemctl enable containerd --now

3.2 【可选】安装nerdctl 工具

# 下载地址
# https://github.com/containerd/nerdctl/releases

# 下载
wget https://github.com/containerd/nerdctl/releases/download/v1.7.6/nerdctl-1.7.6-linux-amd64.tar.gz
scp nerdctl-1.7.6-linux-amd64.tar.gz k8s-master-02:/root/
scp nerdctl-1.7.6-linux-amd64.tar.gz k8s-master-03:/root/
scp nerdctl-1.7.6-linux-amd64.tar.gz k8s-node-01:/root/
scp nerdctl-1.7.6-linux-amd64.tar.gz k8s-node-02:/root/

# 解压
mkdir -p /usr/local/nerdctl/bin && tar -zxvf  nerdctl-1.7.6-linux-amd64.tar.gz -C /usr/local/nerdctl/bin

# 创建软连接文件
ln -s /usr/local/nerdctl/bin/nerdctl /usr/local/bin/nerdctl


# 出现警告 需要安装 buildkit 
wget https://github.com/moby/buildkit/releases/download/v0.15.1/buildkit-v0.15.1.linux-amd64.tar.gz
scp buildkit-v0.15.1.linux-amd64.tar.gz k8s-master-02:/root/
scp buildkit-v0.15.1.linux-amd64.tar.gz k8s-master-03:/root/
scp buildkit-v0.15.1.linux-amd64.tar.gz k8s-node-01:/root/
scp buildkit-v0.15.1.linux-amd64.tar.gz k8s-node-02:/root/

# 解压
mkdir -p /usr/local/buildctl -p && tar -zxvf buildkit-v0.15.1.linux-amd64.tar.gz -C /usr/local/buildctl

# 创建软连接文件
ln -s /usr/local/buildctl/bin/buildkitd /usr/local/bin/buildkitd
ln -s /usr/local/buildctl/bin/buildctl /usr/local/bin/buildctl

# 使用Systemd来管理buildkitd,创建如下所示的systemd unit文件
cat > /etc/systemd/system/buildkit.service <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Service]
ExecStart=/usr/local/bin/buildkitd --oci-worker=false --containerd-worker=true

[Install]
WantedBy=multi-user.target
EOF

# 启动buildkitd
systemctl daemon-reload
systemctl enable buildkit --now
systemctl status buildkit

4. 安装 Kubelet、kubeadm、kubectl

4.1 安装

# 配置K8S的yum源(清华源)
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[Kubernetes]
name=Kubernetes v1.28 (Stable) (rpm)
type=rpm-md
baseurl=https://download.opensuse.org/repositories/isv:/kubernetes:/core:/stable:/v1.28/rpm/
gpgcheck=1
gpgkey=https://download.opensuse.org/repositories/isv:/kubernetes:/core:/stable:/v1.28/rpm/repodata/repomd.xml.key
enabled=1
EOF

# 更新索引
yum makecache 

# 卸载旧版本
yum remove -y kubelet kubeadm kubectl

# 查看可以安装的版本
yum list kubelet --showduplicates | sort -r

# 安装kubelet、kubeadm、kubectl 指定版本 注意版本一定要和你安装k8s版本一致
yum install -y  --nogpgcheck kubelet-1.28.10 kubeadm-1.28.10 kubectl-1.28.10

# 开机启动kubelet
systemctl enable kubelet --now


# 拉取对应的镜像
vi images.sh
#!/bin/bash
images=(
  flannel:v0.25.5
  flannel-cni-plugin:v1.5.1-flannel1
)
for imageName in ${images[@]} ; do
    ctr -n k8s.io image pull registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/$imageName
done

# 赋予执行权限
chmod +x images.sh

# 执行
sh images.sh
# 修改对应的名称
ctr -n k8s.io image tag registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/flannel-cni-plugin:v1.5.1-flannel1 docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel1

ctr -n k8s.io image tag registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/flannel:v0.25.5  docker.io/flannel/flannel:v0.25.5

# 单独拉取 kube-vip
ctr image pull registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/kube-vip:v0.8.2

ctr image tag registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/kube-vip:v0.8.2 ghcr.io/kube-vip/kube-vip:v0.8.2

# ctr -n k8s.io image rm registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/flannel-cni-plugin:v1.5.1-flannel1 registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/flannel:v0.25.5 registry.cn-shenzhen.aliyuncs.com/kube-image-dongdong/kube-vip:v0.8.2

4.2 【可选】kubectl命令自动补全

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
# 记得断开终端重新连接下即可生效

4.3 【可选】开启 crictl

# 文档地址
https://v1-28.docs.kubernetes.io/zh-cn/docs/tasks/debug/debug-cluster/crictl/
# 修改配置文件
cat >> /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false
EOF

# 验证
crictl version

[root@k8s-master-01 ~]# crictl version
Version:  0.1.0
RuntimeName:  containerd
RuntimeVersion:  1.6.33
RuntimeApiVersion:  v1

5. 安装 kube-vip

5.1 主机名称: k8s-master-01 操作

# 生产kube-vip 静态yaml 配置文件
mkdir -p /etc/kubernetes/manifests/
export VIP=192.168.100.50
export INTERFACE=ens32
export KVVERSION=v0.8.2
ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$KVVERSION vip /kube-vip manifest pod \
  --interface $INTERFACE \
  --address $VIP \
  --controlplane \
  --services \
  --arp \
  --enableLoadBalancer \
  --leaderElection | tee /etc/kubernetes/manifests/kube-vip.yaml
  
  
# 查看下镜像拉起策略是否为  IfNotPresent 如果不是则替换
# 修改镜像拉起策略
# sed -i "s#imagePullPolicy: Always#imagePullPolicy: IfNotPresent#g" /etc/kubernetes/manifests/kube-vip.yaml

5.2 主机名称: k8s-master-02 操作

# 创建
mkdir -p /etc/kubernetes/manifests/
mkdir -p /etc/kubernetes/pki/
mkdir -p /etc/kubernetes/pki/etcd/

5.3 主机名称: k8s-master-03 操作

# 创建
mkdir -p /etc/kubernetes/manifests/
mkdir -p /etc/kubernetes/pki/
mkdir -p /etc/kubernetes/pki/etcd/

5.4 主机名称: k8s-master-01 操作

# 复制到所有master节点
scp -r /etc/kubernetes/manifests/kube-vip.yaml k8s-master-02:/etc/kubernetes/manifests/kube-vip.yaml
scp -r /etc/kubernetes/manifests/kube-vip.yaml k8s-master-03:/etc/kubernetes/manifests/kube-vip.yaml

6 初始化集群

6.1 主机名称: k8s-master-01 操作

# 获取kubead默认配置文件 
kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm.yaml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.100.100 # 改成主机本机的ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master-01 # 修改为主机名称
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.100.50:6443"		# 指定集群VIP地址
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 修改镜像仓库地址为阿里云
kind: ClusterConfiguration
kubernetesVersion: 1.28.10 # 和你安装kubelet 版本保持一致
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 # 指定pod 子网
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs # kube-proxy 模式
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerRuntimeEndpoint: ""
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

# 拉起相关镜像
kubeadm config images pull --config kubeadm.yaml


[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.10
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.28.10
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.28.10
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.28.10
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.9
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.12-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.10.1

# 初始化集群
kubeadm init --config kubeadm.yaml

################################################################################################################################
## init完成后第一步:复制相关文件夹
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 导出环境变量
Alternatively, if you are the root user, you can run:
 
  export KUBECONFIG=/etc/kubernetes/admin.conf

# 安装网络插件 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
  # 添加master节点
  kubeadm join 192.168.100.50:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:bf37d819cc5942925bfb890c90dcfd1d83e5140f4f7da599c506177a36b80519 \
	--control-plane  

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.100.50:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:bf37d819cc5942925bfb890c90dcfd1d83e5140f4f7da599c506177a36b80519
################################################################################################################################
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 把k8s-master-01 上面文件内容复制到 k8s-master-02 k8s-master-03
scp /etc/kubernetes/pki/ca.* root@k8s-master-02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.* root@k8s-master-03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* root@k8s-master-02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* root@k8s-master-03:/etc/kubernetes/pki/ 
scp /etc/kubernetes/pki/front-proxy-ca.* root@k8s-master-02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* root@k8s-master-03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* root@k8s-master-02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.* root@k8s-master-03:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf k8s-master-02:/etc/kubernetes/ 
scp /etc/kubernetes/admin.conf k8s-master-03:/etc/kubernetes/ 
scp /etc/kubernetes/admin.conf k8s-node-01:/etc/kubernetes/ 
scp /etc/kubernetes/admin.conf k8s-node-02:/etc/kubernetes/ 

7. 初始化其他master

# 初始化集群 k8s-master-02  k8s-master-03 执行

kubeadm join 192.168.100.50:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:bf37d819cc5942925bfb890c90dcfd1d83e5140f4f7da599c506177a36b80519 \
	--control-plane
	
# 按照内容进行操作	
To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
################################################################################################################################

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

8. 初始化所有Node节点

kubeadm join 192.168.100.50:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:bf37d819cc5942925bfb890c90dcfd1d83e5140f4f7da599c506177a36b80519

9. 部署网络插件

9.1 主机名称: k8s-master-01 操作

# 下载
wget https://github.com/flannel-io/flannel/releases/download/v0.25.5/kube-flannel.yml

# 复制完整文件 进行部署
kubectl apply -f kube-flannel.yml

# 完整文件
apiVersion: v1
kind: Namespace
metadata:
  labels:
    k8s-app: flannel
    pod-security.kubernetes.io/enforce: privileged
  name: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: flannel
  name: flannel
  namespace: kube-flannel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: flannel
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: flannel
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "EnableNFTables": false,
      "Backend": {
        "Type": "vxlan"
      }
    }
kind: ConfigMap
metadata:
  labels:
    app: flannel
    k8s-app: flannel
    tier: node
  name: kube-flannel-cfg
  namespace: kube-flannel
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app: flannel
    k8s-app: flannel
    tier: node
  name: kube-flannel-ds
  namespace: kube-flannel
spec:
  selector:
    matchLabels:
      app: flannel
      k8s-app: flannel
  template:
    metadata:
      labels:
        app: flannel
        k8s-app: flannel
        tier: node
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      containers:
      - args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens32 # 如果是多网卡的话,指定内网网卡的名称
        command:
        - /opt/bin/flanneld
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        image: docker.io/flannel/flannel:v0.25.5
        name: kube-flannel
        resources:
          requests:
            cpu: 100m
            memory: 50Mi
        securityContext:
          capabilities:
            add:
            - NET_ADMIN
            - NET_RAW
          privileged: false
        volumeMounts:
        - mountPath: /run/flannel
          name: run
        - mountPath: /etc/kube-flannel/
          name: flannel-cfg
        - mountPath: /run/xtables.lock
          name: xtables-lock
      hostNetwork: true
      initContainers:
      - args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        command:
        - cp
        image: docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel1
        name: install-cni-plugin
        volumeMounts:
        - mountPath: /opt/cni/bin
          name: cni-plugin
      - args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        command:
        - cp
        image: docker.io/flannel/flannel:v0.25.5
        name: install-cni
        volumeMounts:
        - mountPath: /etc/cni/net.d
          name: cni
        - mountPath: /etc/kube-flannel/
          name: flannel-cfg
      priorityClassName: system-node-critical
      serviceAccountName: flannel
      tolerations:
      - effect: NoSchedule
        operator: Exists
      volumes:
      - hostPath:
          path: /run/flannel
        name: run
      - hostPath:
          path: /opt/cni/bin
        name: cni-plugin
      - hostPath:
          path: /etc/cni/net.d
        name: cni
      - configMap:
          name: kube-flannel-cfg
        name: flannel-cfg
      - hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
        name: xtables-lock   

10. 安装Dashboard

10.1 主机名称: k8s-master-01 操作

# 下载官方提供的yml文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml


# 复制完整版配置文件执行
kubectl apply -f recommended.yaml

# 完整版配置文件
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort  # 加上type=NodePort变成NodePort类型的服务

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: kubernetes-dashboard
          image: registry.aliyuncs.com/google_containers/dashboard:v2.7.0 # 修改镜像为阿里云
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
        - name: dashboard-metrics-scraper
          image: registry.aliyuncs.com/google_containers/metrics-scraper:v1.0.8 # 修改镜像为阿里云
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

11. 访问Dashboard

11.1 主机名称: k8s-master-01 操作

kubectl get svc -A

[root@k8s-master-01 ~]# kubectl get svc -A
NAMESPACE              NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default                kubernetes                  ClusterIP   10.96.0.1       <none>        443/TCP                  98m
kube-system            kube-dns                    ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   98m
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.106.85.86    <none>        8000/TCP                 49s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.109.25.149   <none>        443:30001/TCP            49s


# ip:port https访问 
https://192.168.100.100:30001


# 编写配置清单
vi dashboard-user.yaml

mkdir -p /opt/config-yaml
cat > /opt/config-yaml/dashboard-user.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-user
    namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  name: dashboard-user-secret
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: dashboard-user
type: kubernetes.io/service-account-token
EOF    
# 部署    
kubectl apply -f dashboard-user.yaml

# https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
# 创建token 访问 临时token
kubectl -n kubernetes-dashboard create token dashboard-user

# 长期token
kubectl get secret dashboard-user-secret -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐