kubeadm高可用安装

服务器配置至少是2G2核的。如果不是则可以在集群初始化后面增加 --ignore-preflight-errors=NumCPU

1、克隆机器

172.16.1.55  m01
172.16.1.56  m02
172.16.1.57  n1
172.16.1.58  n2
172.16.1.59  vip

2、修改主机名称

[root@k8s1 ~]# hostnamectl set-hostname m01
[root@k8s2 ~]# hostnamectl set-hostname m02
[root@k8s2 ~]# hostnamectl set-hostname m03
[root@k8s2 ~]# hostnamectl set-hostname n1
[root@k8s3 ~]# hostnamectl set-hostname n2

3、系统优化(所有机器全做)

# 关闭selinux(临时关闭)
[root@m01 ~]# setenforce 0
# 关闭防火墙
systemctl disable --now firewalld

# 临时关闭swap分区
swapoff -a
#永久禁用
sed -i.bak '/swap/s/^/#/' /etc/fstab

修改/etc/fstab 让kubelet忽略swap
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet   

# 修改hosts文件
[root@m01 ~]# cat >> /etc/hosts <<EOF 
172.16.1.55  m01
172.16.1.56  m02
172.16.1.57  n1
172.16.1.58  n2
192.168.1.59  vip
EOF

# 做免密登录
[root@m01 ~]# ssh-keygen -t rsa
[root@m01 ~]# for i in m01 m02 n1 n2;do  ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i; done

# 配置镜像源
[root@m01 ~]# curl  -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
[root@m01 ~]# yum clean all
[root@m01 ~]# yum makecache

# 更新系统
[root@m01 ~]# yum update -y --exclud=kernel*

# 安装基础常用软件
[root@m01 ~]#yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 同步集群时间
[root@m01 ~]# ntpdate ntp1.aliyun.com
[root@m01 ~]# hwclock --systohc

# 更新系统内核(docker 对系统内核要求比较高,最好使用4.4+)
[root@m01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.138-1.el7.elrepo.x86_64.rpm
[root@m01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.138-1.el7.elrepo.x86_64.rpm

## 安装系统内容
[root@m01 ~]# yum localinstall -y kernel-lt*
    ## 调到默认启动
[root@m01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
    ## 查看当前默认启动的内核
[root@m01 ~]# grubby --default-kernel
    ## 重启
[root@m01 ~]# reboot

# 安装IPVS
    yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

   	## 加载IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
	/sbin/modprobe \${kernel_module}
  fi
done
EOF
[root@m01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 修改内核启动参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 立即生效
sysctl --system

4、安装docker(所有机器都要做)

# 卸载之前安装过得docker(若没有安装直接跳过此步)
[root@m01 ~]# sudo yum remove docker docker-common docker-selinux docker-engine

# 安装docker需要的依赖包
[root@m01 ~]# sudo yum install -y yum-utils device-mapper-persistent-data lvm2

# 安装dockeryum源
[root@m01 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo

# 安装docker
[root@m01 ~]# yum install docker-ce -y

# 设置开机自启动
[root@m01 ~]# systemctl enable --now docker.service

5、安装kubelet(所有机器都要装)

# 安装kebenetes yum 源
[root@m01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装kubelet  kubeadm  kubectl
[root@m01 ~]# yum install -y kubelet-1.21.3 kubeadm-1.21.3 kubectl-1.21.3 
[root@m01 ~]#systemctl enable --now kubelet

#安装keepalived、haproxy并部署
[root@m01 ~]# yum install -y keepalived haproxy


# 更改haproxy配置文件
cat > /etc/haproxy/haproxy.cfg <<EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin

frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 #自己的主机名与ip
  server kubernetes-master-01    172.16.1.55:6443  check inter 2000 fall 2 rise 2 weight 100
  server kubernetes-master-02    172.16.1.56:6443  check inter 2000 fall 2 rise 2 weight 100
EOF

# 修改keepalived配置文件 (每个节点的IP不一样)
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
cd /etc/keepalived
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state MASTE
    interface eth0
    mcast_src_ip 172.16.1.56
    virtual_router_id 52
    priority 90
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.59
    }
#    track_script {
#       chk_kubernetes
#    }
}
EOF
[root@m01 ~]#systemctl enable --now keepalived haproxy

[root@k8s-master-02 keepalived]# vim /etc/keepalived/check_kubernetes.sh

VIP="192.168.13.15"
MASTERIP="192.168.13.11"
BACKUPIP="192.168.13.12"

while true; do
    # 探测VIP
    PROBE='ip a | grep "${VIP}"'
    ssh ${MASTERIP}  "${PROBE}" > /dev/null
    MASTER_STATU=$?
    ssh ${BACKUPIP}  "${PROBE}" > /dev/null
    BACKUP_STATU=$?
    if [[ $MASTER_STATU -eq 0 && $BACKUP_STATU -eq 0 ]];then
        ssh ${BACKUPIP}  "systemctl stop keepalived.service"
    fi
    sleep 2
done




#生成初始化配置文件
[root@m01 ~]# kubeadm config print init-defaults >init-config.yaml

#原文内容:
[root@m01 ~]# cat init-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 1.2.3.4
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: 1.21.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}


#编辑初始化配置文件
[root@m01 ~]# cat > init-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signingvi
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.55
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: m01
  taints: 
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
    - 192.168.1.59
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.1.59:8443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/k8sos 
kind: ClusterConfiguration
kubernetesVersion: 1.21.3
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
EOF

#配置文件释义
#版本
apiVersion: kubeadm.k8s.io/v1beta2
#token,kubelet服务启动需要使用的一个识别码,全世界唯一
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  #token时效,默认24小时,超时无效需重新生成
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.55 #此处需要修改为自己的IP地址
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  #节点名需要修改为自己对应的节点名称
  name: m01
  #污点,master节点的话需要加一些配置
  taints: 
  #不可调度
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
#虚拟ip
  certSANs:
    - 192.168.1.59
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
#更改为自己的IP
controlPlaneEndpoint: 192.168.1.59:8443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
#镜像仓库地址,默认为官方的,我们可以下载自己的或者国内的仓库
imageRepository: registry.cn-hangzhou.aliyuncs.com/k8sos 
kind: ClusterConfiguration
#kubernetes版本
kubernetesVersion: 1.21.3
networking:
  dnsDomain: cluster.local
  #添加pod网络
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}

#初始化集群
[root@m01 ~]# kubeadm init --config init-config.yaml --upload-certs
# 根据生成的token将master节点和node节点分别加入集群(master和node的token命令不一样,不要混了)
#管理节点
kubeadm join 192.168.1.59:8443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:7c8281c6049acce06a4e5691e4c4fcbb1a5d41f9365f328ee83d74ce3dc9476e \
	--control-plane --certificate-key b5b0f3a56a5f7a9a7ba315495d60f20c1c26eb72b2259f3fd8aee3df298c64d6

#工作节点
kubeadm join 192.168.1.59:8443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:7c8281c6049acce06a4e5691e4c4fcbb1a5d41f9365f328ee83d74ce3dc9476e

**初始化后续(仅在master节点上执行)**
```bash
# 建立用户集群权限
[root@k8s-m-01 ~]# mkdir -p $HOME/.kube
[root@k8s-m-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-m-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 如果是root用户,则可以使用:export KUBECONFIG=/etc/kubernetes/admin.conf

# 方式一:安装集群网络插件(flannel.yaml见附件)
[root@k8s-m-01 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 如果访问不了quay.io 将quay.io/coreos/flannel 替换为:quay-mirror.qiniu.com/coreos/flannel
[root@k8s-m-01 ~]# kubectl apply -f flannel.yaml

#方式二安装calico网络插件
进入calico官网---->doc----->Install Calico---->kubenetes---->Self-managed on-premises---->Install Calico for on-premises deployments,根据网站提示安装适合自己的版本即可
[root@k8s-m-01 ~]# curl https://docs.projectcalico.org/manifests/calico.yaml -O
[root@k8s-m-01 ~]# kubectl apply -f calico.yaml

若配置不够可以在以上命令后面加上–ignore-preflight-errors=NumCPU

#网络部署完毕后,将其他节点加入集群即可
[root@k8s-master-02 ~]# kubeadm join 192.168.13.15:8443 --token abcdef.0123456789abcdef \

–discovery-token-ca-cert-hash sha256:03e86d6d321aab1e9ad76d57a090eb32f9a0a2c5024d9329d87278db3a997687
–control-plane --certificate-key fd461ab93ac5c57df1f14d51bfa47c4d64ac0dc6a8bbf069b2cc5c61063297c5

[root@k8s-node-01 ~]# kubeadm join 192.168.13.15:8443 --token abcdef.0123456789abcdef \

–discovery-token-ca-cert-hash sha256:03e86d6d321aab1e9ad76d57a090eb32f9a0a2c5024d9329d87278db3a997687

[root@k8s-node-02 ~]# kubeadm join 192.168.13.15:8443 --token abcdef.0123456789abcdef \

–discovery-token-ca-cert-hash sha256:03e86d6d321aab1e9ad76d57a090eb32f9a0a2c5024d9329d87278db3a997687

[root@k8s-m-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-m-01 Ready control-plane,master 13m v1.20.5
k8s-n-01 Ready 35s v1.20.5
k8s-n-02 Ready 39s v1.20.5

#添加快速输入
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo “source <(kubectl completion bash)” >> ~/.bashrc

检查集群状态

第一种方式

[root@k8s-m-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-m-01 Ready control-plane,master 5m56s v1.20.5

第二种方式

[root@k8s-m-01 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-f68b4c98f-5t7wm 1/1 Running 0 5m54s
coredns-f68b4c98f-5xqjs 1/1 Running 0 5m54s
etcd-k8s-m-01 1/1 Running 0 6m3s
kube-apiserver-k8s-m-01 1/1 Running 0 6m3s
kube-controller-manager-k8s-m-01 1/1 Running 0 6m3s
kube-flannel-ds-7bcwl 1/1 Running 0 104s
kube-proxy-ntpjx 1/1 Running 0 5m54s
kube-scheduler-k8s-m-01 1/1 Running 0 6m3s

第三种方式:直接验证集群DNS

[root@k8s-m-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3
If you don’t see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

### 报错及解决方案
问题一:
出现**The connection to the server localhost:8080 was refused - did you specify the right host or port?**问题
**问题分析:**
环境变量
原因:kubernetes master没有与本机绑定,集群初始化的时候没有绑定,此时设置在本机的环境变量即可解决问题

**解决方案如下:**

步骤一:加入环境变量
具体根据情况,此处记录linux设置该环境变量
方式一:编辑文件设置
[root@m01 ~]# vim /etc/profile
在底部增加新的环境变量 export KUBECONFIG=/etc/kubernetes/admin.conf

方式二:直接追加文件内容
[root@m01 ~]# echo “export KUBECONFIG=/etc/kubernetes/admin.conf” >> /etc/profile

步骤二:重载配置文件
[root@m01 ~]# source /etc/profile


问题二:
部署完master节点以后,执行kubectl get cs命令来检测组件的运行状态时,报如下错误:





**原因分析:**
出现这种情况,**是/etc/kubernetes/manifests/下的kube-controller-manager.yaml和kube-scheduler.yaml设置的默认端口是0导致的,解决方式是注释掉对应的port即可**,操作如下

步骤一:
kube-controller-manager.yaml文件修改: 将 --port=0注释掉

步骤二:
kube-scheduler.yaml文件修改: 将 --port=0注视掉

步骤三:然后在master节点上重启kubelet,systemctl restart kubelet.service,然后重新查看就正常了




附件:

```yaml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

kubeadm添加新master或node

一、首先在master上生成新的token
kubeadm token create --print-join-command
[root@cn-hongkong nfs]# kubeadm token create --print-join-command
kubeadm join 172.31.182.156:8443 --token ortvag.ra0654faci8y8903 --discovery-token-ca-cert-hash sha256:04755ff1aa88e7db283c85589bee31fabb7d32186612778e53a536a297fc9010

二、在master上生成用于新master加入的证书
kubeadm init phase upload-certs --experimental-upload-certs
[root@cn-hongkong k8s_yaml]# kubeadm init phase upload-certs --experimental-upload-certs
[upload-certs] Storing the certificates in ConfigMap “kubeadm-certs” in the “kube-system” Namespace
[upload-certs] Using certificate key:
f8d1c027c01baef6985ddf24266641b7c64f9fd922b15a32fce40b6b4b21e47d

三、添加新node
kubeadm join 172.31.182.156:8443 --token ortvag.ra0654faci8y8903 --discovery-token-ca-cert-hash sha256:04755ff1aa88e7db283c85589bee31fabb7d32186612778e53a536a297fc9010

四、添加新master,把红色部分加到–experimental-control-plane --certificate-key后。
kubeadm join 172.31.182.156:8443 --token ortvag.ra0654faci8y8903
–discovery-token-ca-cert-hash sha256:04755ff1aa88e7db283c85589bee31fabb7d32186612778e53a536a297fc9010
–experimental-control-plane --certificate-key f8d1c027c01baef6985ddf24266641b7c64f9fd922b15a32fce40b6b4b21e47d

安装集群图形化界面

#dashboard配置文件在https://github.com/kubernetes/dashboard中
配置文件链接地址
或者自己手写内容如下

#编辑recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

部署安装

[root@k8s-m-01 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

开一个端口,用于访问

[root@k8s-m-01 ~]# kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard
type: ClusterIP => type: NodePort

查看修改后得端口

[root@k8s-m-01 ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.96.86.206 8000/TCP 3m58s
kubernetes-dashboard NodePort 10.96.113.185 443:46491/TCP 3m59s

创建token配置文件

[root@k8s-m-01 ~]# vim token.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

部署token到集群

[root@k8s-m-01 ~]# kubectl apply -f token.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

获取token

[root@k8s-m-01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ‘{print $1}’) | grep token: | awk ‘{print $2}’
eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx1bS1XSGE3Rk9wMnFNdVNqdHJGVi1ERFAzVjFyQXFXeFBWQ0RGS2N0bUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWtuYnptIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI1YzVkMzdmMS03MTFkLTQ0YjYtOWIzOS0zZjEzMDFkNDRjMmUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.oJqolcMm_W81p3JnprpHSRaIFCjL533ihC5_YMWRRE9WZmpK6_-EpBk6GmnuGJ4EsRT89AxJIqDN3edxtLnirRoqTUTfqDbU-ik5fyDZ_rRjuLD8fFtkZ6-WXHGo76Kj-Sw8CnpLzkaced9KhpRLHtMFawQxkeMf2SKVgxr1uWWmzVTXaylhh_frIvSbkxt5A_YflEhGyHPh6EbPg1T9WoDsLa7oTXGGAXzu97_j2AU3u6TBPkwKn4S-cFOceh_KqSKNsnmwGE9FohaKQP1X_WpDgfjXohR7xbGScW_VUj1XuI_75Exip8yflgZ70rF93xnfS69V_1wvPs2sfOO5-g

#在网页上粘贴token登录出现以下界面即为成功
在这里插入图片描述

安装kuborad

简介
Kuboard,是一款免费的 Kubernetes 图形化管理工具,Kuboard 力图帮助用户快速在 Kubernetes 上落地微服务

安装

[root@k8s-master-01 ~]# kubectl apply -f https://kuboard.cn/install-script/kuboard.yaml
[root@k8s-master-01 ~]# kubectl apply -f https://addons.kuboard.cn/metrics-server/0.3.6/metrics-server.yaml

查看状态

[root@k8s-master-01 ~]# kubectl get pod -n kube-system
如下图:

在这里插入图片描述
然后访问集群中任意节点的 32567 端口(http://ip:32567) ,即可打开 Kuboard 界面,我的 Node 节点 IP 为:http://192.168.13.13:32567
在这里插入图片描述

获取Token

[root@k8s-master-01 ~]# echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)

显示如下:

eyJhbGciOiJSUzI1NiIsImtpZCI6IldZb29XX0ZjY09wWUhGbHNGak1mRXdidzZvSlMzUENDZ3JVSlJ6dmZJc0UifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJvYXJkLXVzZXItdG9rZW4tbjRtc2wiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMjk3YmQwNTMtZDQyZC00NmFlLWFhNzQtZGUyOWI5ODY0NjRjIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmt1Ym9hcmQtdXNlciJ9.ZSdGQoPkNa-DB0OoOziHxcKxAdbX1J3dwUv8cMN0gNpo_r2puIVY0w20BC0GIdMAlWG3PMBQMnNoObZpT3E0G_X03YEiCBHHryGyDTtYowfaKngbE9h6hXG3_dnZVGyOctRrBPclbDImmSzm0c1kAGpxzOBQTS0vtFWNgsOoTW9lYXtJ5aIb1b1JX9S7PPXOBrl9xX1P4sJN74qSY7xSEo12SbSDGMztIfe-qI0qoei5ykhNcmQt1Dunnjl08F11GjhWLaH9z1GsNxLM05zbXEhiwUvddjaksbip7grc8UFNXvEYnN3vRvoXpMC9MRiMA2rn5RbvnxHhHlDKfAKhxA

在这里插入图片描述
粘贴至上面的网站即可,安装成功
在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐