Kubernetes二进制安装

实验环境

主机名IP备注
k8s-80192.168.188.802U2G、master、master-01、m-01
k8s-81192.168.188.812U2G、master、master-02、m-02
k8s-82192.168.188.822U2G、master、master-03、m-03
k8s-83192.168.188.831U1G、node、node-01、n-01
SVIP192.168.188.86

基础环境准备参考Kubernetes的kubeadm安装笔记;这里省略

需要注意的是这里要做免密与SVIP

免密

master-01

# 创建密钥
[root@k8s-80 ~]# ssh-keygen -t RSA -N '' -f ~/.ssh/id_rsa

# 分发密钥
[root@k8s-80 ~]# ssh-copy-id 192.168.188.80
[root@k8s-80 ~]# ssh-copy-id 192.168.188.81
[root@k8s-80 ~]# ssh-copy-id 192.168.188.82
[root@k8s-80 ~]# ssh-copy-id 192.168.188.83

映射

master-01

# 更改主机名
[root@k8s-80 ~]# hostnamectl set-hostname m-01 && bash
[root@k8s-81 ~]# hostnamectl set-hostname m-02 && bash
[root@k8s-82 ~]# hostnamectl set-hostname m-03 && bash
[root@k8s-83 ~]# hostnamectl set-hostname n-01 && bash


# 如下添加
[root@m-01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.188.80 master-01 m-01
192.168.188.81 master-02 m-02
192.168.188.82 master-03 m-03
192.168.188.83 node-01 n-01
192.168.188.86 master-vip svip


# 分发
[root@m-01 ~]# scp -r /etc/hosts 192.168.188.81:/etc/hosts
hosts                                                                                       100%  307   414.9KB/s   00:00    
[root@m-01 ~]# scp -r /etc/hosts 192.168.188.82:/etc/hosts
hosts                                                                                       100%  307   230.0KB/s   00:00    
[root@m-01 ~]# scp -r /etc/hosts 192.168.188.83:/etc/hosts
hosts                                                                                       100%  307   199.6KB/s   00:00    
[root@m-01 ~]#

workers节点操作

集群证书

以下命令只需要在master-01执行即可

安装证书生成工具

https://pkg.cfssl.org/
https://pkg.cfssl.org/

这里使用1.6.1版本的,上传到服务器上

# 设置执行权限
[root@m-01 ~]# chmod +x cfssl_1.6.1_linux_amd64
[root@m-01 ~]# chmod +x cfssljson_1.6.1_linux_amd64
[root@m-01 ~]# ll
总用量 27052
-rw-------. 1 root root     1578 514 14:50 anaconda-ks.cfg
-rwxr-xr-x  1 root root 16659824 530 21:28 cfssl_1.6.1_linux_amd64
-rwxr-xr-x  1 root root 11029744 530 21:28 cfssljson_1.6.1_linux_amd64
-rw-r--r--. 1 root root     2077 514 14:53 Initialization.sh


# 移动到/usr/local/bin做成系统命令
[root@m-01 ~]# cp cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@m-01 ~]# cp cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson


# 输入前两个字母tab验证一下是否成功

生成根证书(CA)

[root@m-01 ~]# mkdir -p /data/cert/ca

[root@m-01 ~]# cat > /data/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "876000h"
      }
    }
  }
}
EOF


[root@m-01 ~]# ls /data/cert/ca/
ca-config.json

[root@m-01 ~]# cd /data/cert/ca/

生成根证书请求文件(csr)

[root@m-01 ca]# cat > /data/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "GuangZhou",
    "L": "GuangZhou"
  }]
}
EOF
# 验证
[root@m-01 ca]# ls
ca-config.json  ca-csr.json

生成根证书

[root@m-01 ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2022/05/30 21:31:27 [INFO] generating a new CA key and certificate from CSR
2022/05/30 21:31:27 [INFO] generate received request
2022/05/30 21:31:27 [INFO] received CSR
2022/05/30 21:31:27 [INFO] generating key: rsa-2048
2022/05/30 21:31:27 [INFO] encoded CSR
2022/05/30 21:31:27 [INFO] signed certificate with serial number 338018253951042801845110680725117966048987982119
# 验证
[root@m-01 ca]# ll
总用量 20
-rw-r--r-- 1 root root  289 530 21:30 ca-config.json
-rw-r--r-- 1 root root  964 530 21:31 ca.csr
-rw-r--r-- 1 root root  155 530 21:31 ca-csr.json
-rw------- 1 root root 1675 530 21:31 ca-key.pem
-rw-r--r-- 1 root root 1237 530 21:31 ca.pem

部署ETCD集群

# 节点规划
# 共识算法 raft

192.168.188.51
192.168.188.52
192.168.188.53

创建ETCD集群证书

[root@m-01 ca]# mkdir -p /data/cert/etcd
[root@m-01 ca]# cd /data/cert/etcd
# 需要注意这里的hosts配置要与映射文件里面写的一致
cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "192.168.188.80",
        "192.168.188.81",
        "192.168.188.82",
        "192.168.188.83",
        "192.168.188.86"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "GuangZhou",
          "L": "GuangZhou"
        }
    ]
}
EOF
# 验证
[root@m-01 etcd]# ls
etcd-csr.json

生成ETCD证书

[root@m-01 etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@m-01 etcd]# ls
etcd.csr  etcd-csr.json  etcd-key.pem  etcd.pem

分发ETCD证书

[root@m-01 etcd]# for ip in m-01 m-02 m-03;do 
   ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
   scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
   scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl   
done
# 三台etcd机子验证,/etc/etcd/ssl有四个文件
ls /etc/etcd/ssl
ca-key.pem  ca.pem  etcd-key.pem  etcd.pem

部署ETCD

下载ETCD安装包

https://github.com/etcd-io/etcd/releases这里选择所需的版本;我选择了3.5.4

上传到/date目录下

[root@m-01 data]# ls
cert  etcd-v3.5.4-linux-amd64.tar.gz

# 解压
[root@m-01 data]# tar xf etcd-v3.5.4-linux-amd64.tar.gz 
[root@m-01 data]# ls
cert  etcd-v3.5.4-linux-amd64  etcd-v3.5.4-linux-amd64.tar.gz


# 分发至其他节点
[root@m-01 data]# for i in m-01 m-02 m-03
do
    scp ./etcd-v3.5.4-linux-amd64/etcd* root@$i:/usr/local/bin/
done
# 三台etcd机子验证
etcd --version
etcd Version: 3.5.4
Git SHA: 08407ff76
Go Version: go1.16.15
Go OS/Arch: linux/amd64

注册ETCD服务

# 在三台master节点上执行
mkdir -pv /etc/kubernetes/conf/etcd


ETCD_NAME=`hostname`
INTERNAL_IP=`hostname -i`
INITIAL_CLUSTER=m-01=https://192.168.188.80:2380,m-02=https://192.168.188.81:2380,m-03=https://192.168.188.82:2380
cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now etcd

测试ETCD服务

# 第一种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" \
endpoint status --write-out='table'

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-9t62WUrX-1674898785452)(D:\桌面\file\截图\2022-05-30_214037.png)]

# 第二种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" \
member list --write-out='table'

创建集群证书

创建集群CA证书

[root@m-01 data]# mkdir /data/cert/k8s
[root@m-01 data]# cd /data/cert/k8s
[root@m-01 k8s]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "876000h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

创建集群请求文件(csr)

[root@m-01 k8s]# cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangZhou",
            "ST": "GuangZhou"
        }
    ]
}
EOF
[root@m-01 k8s]# ls
ca-config.json  ca-csr.json

创建证书

[root@m-01 k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
[root@m-01 k8s]# ll
总用量 20
-rw-r--r-- 1 root root  296 530 21:48 ca-config.json
-rw-r--r-- 1 root root  964 530 21:52 ca.csr
-rw-r--r-- 1 root root  216 530 21:49 ca-csr.json
-rw------- 1 root root 1675 530 21:52 ca-key.pem
-rw-r--r-- 1 root root 1237 530 21:52 ca.pem

创建kube-apiserver的证书

[root@m-01 k8s]# pwd
/data/cert/k8s
[root@m-01 k8s]# cat > server-csr.json << EOF   # 用于无头服务和corddns的组件配置
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "192.168.188.80",
        "192.168.188.81",
        "192.168.188.82",
        "192.168.188.83",
        "192.168.188.86",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangZhou",
            "ST": "GuangZhou"
        }
    ]
}
EOF
[root@m-01 k8s]# ls   # server-csr.json
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  server-csr.json
[root@m-01 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
[root@m-01 k8s]# ll
总用量 36
-rw-r--r-- 1 root root  296 530 21:48 ca-config.json
-rw-r--r-- 1 root root  964 530 21:52 ca.csr
-rw-r--r-- 1 root root  216 530 21:49 ca-csr.json
-rw------- 1 root root 1675 530 21:52 ca-key.pem
-rw-r--r-- 1 root root 1237 530 21:52 ca.pem
-rw-r--r-- 1 root root 1240 530 21:57 server.csr
-rw-r--r-- 1 root root  585 530 21:56 server-csr.json
-rw------- 1 root root 1675 530 21:57 server-key.pem
-rw-r--r-- 1 root root 1529 530 21:57 server.pem

创建controller-manager的证书

[root@m-01 k8s]# pwd
/data/cert/k8s
cat > kube-controller-manager-csr.json << EOF
 {
     "CN": "system:kube-controller-manager",
     "hosts": [
         "127.0.0.1",
         "192.168.188.80",
         "192.168.188.81",
         "192.168.188.82",
         "192.168.188.83",
         "192.168.188.86"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "GuangZhou",
             "ST": "GuangZhou",
             "O": "system:kube-controller-manager",
             "OU": "System"
         }
     ]
 }
EOF
[root@m-01 k8s]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  kube-controller-manager-csr.json  server.csr  server-csr.json  server-key.pem  server.pem
[root@m-01 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
[root@m-01 k8s]# ls

创建kube-scheduler的证书

[root@m-01 k8s]# pwd
/data/cert/k8s
cat > kube-scheduler-csr.json << EOF
 {
     "CN": "system:kube-scheduler",
     "hosts": [
         "127.0.0.1",
         "192.168.188.80",
         "192.168.188.81",
         "192.168.188.82",
         "192.168.188.83",
         "192.168.188.86"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "GuangZhou",
             "ST": "GuangZhou",
             "O": "system:kube-scheduler",
             "OU": "System"
         }
     ]
 }
EOF
[root@m-01 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

创建kube-proxy的证书

[root@m-01 k8s]# pwd
/data/cert/k8s
cat > kube-proxy-csr.json << EOF
 {
     "CN":"system:kube-proxy",
     "hosts":[],
     "key":{
         "algo":"rsa",
         "size":2048
     },
     "names":[
         {
             "C":"CN",
             "L":"GuangZhou",
             "ST":"GuangZhou",
             "O":"system:kube-proxy",
             "OU":"System"
         }
     ]
 }
EOF
[root@m-01 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy   # 有个警告

创建集群管理员证书

[root@m-01 k8s]# pwd
/data/cert/k8s
cat > admin-csr.json << EOF
 {
     "CN":"admin",
     "key":{
         "algo":"rsa",
         "size":2048
     },
     "names":[
         {
             "C":"CN",
             "L":"GuangZhou",
             "ST":"GuangZhou",
             "O":"system:masters",
             "OU":"System"
         }
     ]
 }
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin   # 有个警告

颁布workers证书

[root@m-01 ~]# mkdir -pv /etc/kubernetes/ssl
mkdir: 已创建目录 "/etc/kubernetes/ssl"
[root@m-01 ~]# cd /data/cert/k8s/
[root@m-01 k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl
[root@m-01 k8s]# for i in m-01 m-02 m-03; do
    ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
    scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done
# 验证
ls /etc/kubernetes/ssl

编写配置文件以及下载安装包

这里使用1.22.10版本

下载安装包

[root@m-01 k8s]# cd /data/
[root@m-01 data]# wget https://dl.k8s.io/v1.22.10/kubernetes-server-linux-amd64.tar.gz
[root@m-01 data]# ls
cert  etcd-v3.5.4-linux-amd64  etcd-v3.5.4-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz
[root@m-01 data]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@m-01 data]# ls
cert  etcd-v3.5.4-linux-amd64  etcd-v3.5.4-linux-amd64.tar.gz  kubernetes  kubernetes-server-linux-amd64.tar.gz

分发组件

[root@m-01 bin]# for i in m-01 m-02 m-03 ;do scp kube-apiserver kube-controller-manager kube-proxy kubectl kubelet kube-scheduler root@$i:/usr/local/bin; done

创建集群配置文件

[root@m-01 bin]# cd /data/
[root@m-01 data]# mkdir k8s-conf
[root@m-01 data]# cd k8s-conf/
设置环境变量
[root@m-01 k8s-conf]# export KUBE_APISERVER="https://192.168.188.86:8443"
创建kube-controller-manager.kubeconfig
# 设置集群参数
kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-controller-manager.kubeconfig
      
      
[root@m-01 k8s-conf]# ls
kube-controller-manager.kubeconfig
# 设置客户端认证参数
    kubectl config set-credentials "kube-controller-manager" \
      --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
      --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-controller-manager.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-controller-manager" \
      --kubeconfig=kube-controller-manager.kubeconfig
# 配置默认上下文
[root@m-01 k8s-conf]# kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
Switched to context "default".
创建kube-scheduler.kubeconfig
# 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-scheduler.kubeconfig
# 设置客户端认证参数
    kubectl config set-credentials "kube-scheduler" \
      --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
      --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-scheduler.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-scheduler" \
      --kubeconfig=kube-scheduler.kubeconfig
# 配置默认上下文
    kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
创建kube-proxy.kubeconfig集群配置文件
# 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-proxy.kubeconfig
# 设置客户端认证参数
    kubectl config set-credentials "kube-proxy" \
      --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
      --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="kube-proxy" \
      --kubeconfig=kube-proxy.kubeconfig
# 配置默认上下文
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
创建超级管理员的集群配置文件
# 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=admin.kubeconfig
# 设置客户端认证参数
    kubectl config set-credentials "admin" \
      --client-certificate=/etc/kubernetes/ssl/admin.pem \
      --client-key=/etc/kubernetes/ssl/admin-key.pem \
      --embed-certs=true \
      --kubeconfig=admin.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
    kubectl config set-context default \
      --cluster=kubernetes \
      --user="admin" \
      --kubeconfig=admin.kubeconfig
# 配置默认上下文
    kubectl config use-context default --kubeconfig=admin.kubeconfig
颁发集群配置文件
[root@m-01 k8s-conf]# for i in m-01 m-02 m-03; do
    ssh root@$i  "mkdir -pv /etc/kubernetes/cfg"
    scp ./*.kubeconfig root@$i:/etc/kubernetes/cfg
done
# 三台master机子都验证,四个配置文件
[root@m-01 k8s-conf]# ll /etc/kubernetes/cfg/
总用量 32
-rw------- 1 root root 6060 531 00:20 admin.kubeconfig
-rw------- 1 root root 6264 531 00:20 kube-controller-manager.kubeconfig
-rw------- 1 root root 6098 531 00:20 kube-proxy.kubeconfig
-rw------- 1 root root 6210 531 00:20 kube-scheduler.kubeconfig

创建集群token

# 只需要创建一次
# 必须要用自己机器创建的Token
[root@m-01 ~]# TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
[root@m-01 ~]# cd /data/cert/k8s/
[root@m-01 k8s]#cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

[root@m-01 k8s]# cat token.csv   # 检查一下

分发集群token,用于集群TLS认证

[root@m-01 k8s]# for i in m-01 m-02 m-03;do
scp token.csv root@$i:/etc/kubernetes/cfg/
done

部署各个组件

安装kube-apiserver

创建kube-apiserver的配置文件
# 在所有的master节点上执行
KUBE_APISERVER_IP=`hostname -i`
cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF
注册kube-apiserver的服务
# 在所有的master节点上执行
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver && systemctl start kube-apiserver

发现报错

报错一:未创建token,缺失文件;但是这里已经做了,查看messages日志看到

解决:创建token就行

image-20220531095553782

报错二:vim /var/log/messages 查看会找到 Error: [service-account-issuer is a required flag, --service-account-signing-key-file and --service-account-issuer are required flags]

解决

# 生成sa-key
cat<<EOF >/data/cert/k8s/sa-csr.json 
{
    "CN":"sa",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"GuangZhou",
            "ST":"GuangZhou",
            "O":"kubernetes",
            "OU":"System"
        }
    ]
}
EOF


cfssl gencert -initca /data/cert/k8s/sa-csr.json  | cfssljson -bare /data/cert/k8s/sa -


openssl x509 -in /data/cert/k8s/sa.pem -pubkey -noout > /data/cert/k8s/sa.pub



# 分发sa-key
for i in m-01 m-02 m-03;do
scp sa-key.pem sa.pub root@$i:/etc/kubernetes/ssl/
done


# 三台机子都操作
# 创建日志目录:
mkdir /var/log/kubernetes/

# 修改/etc/kubernetes/cfg/kube-apiserver.conf的参数
# 在最后添加,注意双引号与反斜杆
--service-account-key-file=/etc/kubernetes/ssl/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/ssl/sa-key.pem \
--service-account-issuer=api"


systemctl start kube-apiserver
# 正确配置文件应如下
cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--service-account-key-file=/etc/kubernetes/ssl/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/ssl/sa-key.pem \\
--service-account-issuer=api"
EOF
# 验证端口
netstat -anltp | grep 6443

对kube-apiserver做高可用

三台机子都操作

安装高可用软件

# 三台master节点都需要安装
# keeplived + haproxy
yum install -y keepalived haproxy

修改keepalived配置文件

# 根据节点的不同,修改的配置也不同
# 备份
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
cd /etc/keepalived
  
KUBE_APISERVER_IP=`hostname -i`
  
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
      router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
      script "/etc/keepalived/check_kubernetes.sh"   # 检测haproxy存活脚本
      interval 2
      weight -5
      fall 3
      rise 2
}
vrrp_instance VI_1 {
      state MASTER   # 另外两台改成BACKUP
      interface ens33  # 网卡名字
      mcast_src_ip 192.168.188.80
      virtual_router_id 51
      priority 100   # 更改权重比100小即可,不能重复
      advert_int 2
      authentication {
          auth_type PASS
          auth_pass K8SHA_KA_AUTH
      }
      virtual_ipaddress {
          192.168.188.86   # 虚拟IP
      }
      track_script {       # 这段不加无法调用监测脚本
          chk_kubernetes
      }
}
EOF
#!/bin/bash
PID=`ps -C haproxy --no-header | wc -l`
# 判断haproxy是否宕机,如果宕机了,尝试重启
if [ $PID -eq 0 ];then
    # /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg
    /usr/bin/systemctl start haproxy
    # 等待一小会再次检查haproxy,如果没有启动成功,则停止keepalived,使其启动备用机
    sleep 3
    if [ `ps -C haproxy --no-header | wc -l` -eq 0 ];then
        /usr/bin/systemctl stop keepalived
    fi
fi
chmod +x /etc/keepalived/check_kubernetes.sh
systemctl enable --now keepalived

修改haproxy配置文件

# 高可用软件
cat > /etc/haproxy/haproxy.cfg <<EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s
  
defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s
  
frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor
  
listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin
  
frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master
  
backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server kubernetes-master-01    192.168.188.80:6443  check inter 2000 fall 2 rise 2 weight 100
  server kubernetes-master-02    192.168.188.81:6443  check inter 2000 fall 2 rise 2 weight 100
  server kubernetes-master-03    192.168.188.82:6443  check inter 2000 fall 2 rise 2 weight 100
EOF
systemctl enable --now haproxy.service 

验证

netstat -anltp |grep 8443

ip a   # 查看SVIP

部署TLS

# apiserver 动态签署颁发到master节点,实现证书签署自动化

创建集群配置文件

# 只需要在一台节点上执行
export KUBE_APISERVER="https://192.168.188.86:8443"   # 这一步一定确认都做了,不然后面kubelet-bootstrap.kubeconfig生成的cluster里面是空的

设置集群参数

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

设置客户端认证参数

# 此处token必须用上叙token.csv中的token

[root@m-01 ~]# cd /data/cert/k8s/
[root@m-01 k8s]# cat token.csv 
145e3c56ad67e4e9bfeb607d1b18468a,kubelet-bootstrap,10001,"system:kubelet-bootstrap"


# 使用自己的token.csv里面的token
kubectl config set-credentials "kubelet-bootstrap" \
  --token=145e3c56ad67e4e9bfeb607d1b18468a \
  --kubeconfig=kubelet-bootstrap.kubeconfig

设置上下文参数

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

配置默认上下文

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

颁发证书

# 颁发集群配置文件
for i in m-01 m-02 m-03; do
    scp kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/
done

创建TLS低权限用户

# 声明一下admin配置文件所在位置:
export KUBECONFIG=/etc/kubernetes/cfg/admin.kubeconfig   # 三台机子都操作

# 创建一个低权限用户
kubectl create clusterrolebinding kubelet-bootstrap \
  --clusterrole=system:node-bootstrapper \
  --user=kubelet-bootstrap

部署kube-contorller-manager

编辑配置文件

# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--alsologtostderr=true"
#--horizontal-pod-autoscaler-use-rest-clients=true"   # 新版已经取消,比如这个版本
EOF

注册服务

# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动

systemctl daemon-reload

systemctl enable --now kube-controller-manager.service

验证

systemctl status kube-controller-manager

kubectl get cs   # 查看集群状态

部署kube-scheduler

编写配置文件

# 三台机器上都需要执行
cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1"
EOF

注册服务

# 三台节点上都需要执行
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动

systemctl daemon-reload

systemctl enable --now kube-scheduler.service

systemctl status kube-scheduler.service

kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}

部署kubelet服务

创建kubelet服务配置文件

# 检查pause需要什么版本的,利用阿里云构建拉取
[root@m-01 k8s]# kubelet --help |grep pause
      --pod-infra-container-image string                         Specified image will not be pruned by the image garbage collector. When container-runtime is set to 'docker', all containers in each pod will use the network/ipc namespaces from this image. Other CRI implementations have their own configuration to set this image. (default "k8s.gcr.io/pause:3.5")
# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname`

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=${KUBE_HOSTNAME} \\
--container-runtime=docker \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-shenzhen.aliyuncs.com/xiaosu_shy/pause:3.5"   # 需要注意,仓库是私有
EOF

创建kubelet-config.yaml

# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname -i`
  
cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${KUBE_HOSTNAME}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

发现报错

 E0531 16:51:47.272319   62380 server.go:294] "Failed to run kubelet" err="failed to run Kubelet: misconfiguration: kubelet cgroup driver: \"cgroupfs\" is different from docker cgroup driver: \"systemd\""
 kubelet.service: main process exited, code=exited, status=1/FAILURE

解决

# 所有机子都验证

# 因为上面kubelet-config.yml定义cgroupDriver是cgroupfs
# 使用以下命令查看是否一致

docker info


cat /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],   # 删除这一项
  "registry-mirrors": ["https://niphmo8u.mirror.aliyuncs.com"]
}


# 重启docker
systemctl daemon-reload
systemctl restart docker

注册kubelet的服务

# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动

systemctl daemon-reload

systemctl enable --now kubelet.service 

部署kube-proxy

创建配置文件

# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF

创建kube-proxy-config.yml

# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname -i`
HOSTNAME=`hostname`


cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ${KUBE_HOSTNAME}
healthzBindAddress: ${KUBE_HOSTNAME}:10256
metricsBindAddress: ${KUBE_HOSTNAME}:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: ${HOSTNAME}
clusterCIDR: 10.96.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF

注册服务

# 需要在三台master节点上执行

cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动

systemctl daemon-reload 

systemctl enable --now kube-proxy.service 

systemctl status kube-proxy

加入集群节点

查看集群节点加入请求

# 只需要在一台节点上执行即可

[root@m-01 k8s]# kubectl get csr   # 查看多少台机子需要加入

批准加入

# 只需要在一台节点上执行即可
[root@m-01 k8s]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
[root@m-01 k8s]# kubectl get nodes
NAME   STATUS   ROLES    AGE   VERSION
m-01   Ready    <none>   36m   v1.22.10
m-02   Ready    <none>   80m   v1.22.10
m-03   Ready    <none>   80m   v1.22.10

配置 kubernetes 用户信息

三台机子都操作

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/cfg/admin.kubeconfig $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装网络插件

本次选择使用flannel网络插件

下载flannel安装包并安装

上传至m-01机器下的/data/

# 只需要在一台节点上执行即可

[root@m-01 data]# tar xf flannel-v0.14.0-linux-amd64.tar.gz

[root@m-01 data]# for i in m-01 m-02 m-03;do
    scp flanneld mk-docker-opts.sh root@$i:/usr/local/bin/
done

将flannel配置写入集群数据库

# 只需要在一台节点上执行即可
ETCDCTL_API=2 etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" \
set /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'
写不进去时:
查看 flannel 日志报错如下:

Couldn't fetch network config: client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint. timed out

无法获取网络配置:客户端:响应无效json。端点可能不是有效的etcd集群端点。 计时结束
然后检查 etcd 和 flannel 版本

# ./etcd --version
etcd Version: 3.4.15
Git SHA: aa7126864
Go Version: go1.12.17
Go OS/Arch: linux/amd64


# ./flanneld --version
v0.11.0
然后查阅 flanneld 官网文档,上面标准了 flannel 这个版本不能给 etcd 3 进行通信,所以才造成了上面的问题,下面进行如下操作

1,停止 flannel

使用自己的方法停止,

2,删除 etcd 里面刚才创建的 key

# etcdctl get --prefix /coreos.com
etcdctl del /coreos.com/network/config 
3, 开启 etcd v2 API 接口

在 etcd 启动命令里面添加上如下内容,然后重启 etcd 集群

--enable-v2
# 三台机子都操作
vim /usr/lib/systemd/system/etcd.service
# 在--name下添加
--enable-v2 \


# 加载
systemctl daemon-reload

# 重启etcd
systemctl restart etcd

# 验证
systemctl status etcd

ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" \
endpoint status --write-out='table'

重新运行

ETCDCTL_API=2 etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" \
set /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'
# 验证
ETCDCTL_API=2 etcdctl --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --endpoints="https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379" get /coreos.com/network/config

{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}

注册flannel服务

# 需要在三台机器运行

cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
  -etcd-cafile=/etc/etcd/ssl/ca.pem \\
  -etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  -etcd-endpoints=https://192.168.188.80:2379,https://192.168.188.81:2379,https://192.168.188.82:2379 \\
  -etcd-prefix=/coreos.com/network \\
  -ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

修改docker启动文件

# 让flannel接管docker网络
# 三台机子都操作

sed -i '/ExecStart/s/\(.*\)/#\1/' /usr/lib/systemd/system/docker.service

sed -i '/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock' /usr/lib/systemd/system/docker.service

sed -i '/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env' /usr/lib/systemd/system/docker.service

启动

# 先启动flannel,再启动docker
systemctl daemon-reload

systemctl enable --now flanneld.service 

systemctl restart docker

验证集群网络

# 集群节点互ping对方的flannel网络
[root@m-01 data]# kubectl get no
NAME   STATUS   ROLES    AGE    VERSION
m-01   Ready    <none>   134m   v1.22.10
m-02   Ready    <none>   179m   v1.22.10
m-03   Ready    <none>   179m   v1.22.10
[root@m-01 data]# kubectl get ns
NAME              STATUS   AGE
default           Active   12h
kube-node-lease   Active   12h
kube-public       Active   12h
kube-system       Active   12h
# 三台机子都做

kubelet报错:
"Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/docker.service\
解决:
vim /usr/lib/systemd/system/kubelet.service
[Service]下增加:
CPUAccounting=true
MemoryAccounting=true

报错:
kube-scheduler:k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver
解决:
vim /etc/kubernetes/cfg/kube-scheduler.conf
改成:
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/var/log/kubernetes \
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \
--leader-elect=true \
--bind-address=0.0.0.0"


systemctl daemon-reload
systemctl restart kube-scheduler kubelet
systemctl status kube-scheduler kubelet

安装集群DNS

下载DNS安装配置文件包

# 只需要在一台节点上执行即可

# wget https://github.com/coredns/deployment/archive/refs/heads/master.zip
# 这里选择自行下载上传到/data/下

[root@m-01 data]# ls
coredns-deployment-master.zip

[root@m-01 data]# unzip coredns-deployment-master.zip

[root@m-01 data]# [root@m-01 data]# cd deployment-master/kubernetes/

执行部署命令

[root@m-01 kubernetes]# ./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

验证集群DNS

[root@m-01 kubernetes]# kubectl get pods -n kube-system
NAME                      READY   STATUS    RESTARTS   AGE
coredns-599f56cc4-7bsbm   1/1     Running   0          69s

验证集群

# 绑定一下超管用户(只需要在一台服务器上执行即可)
[root@m-01 kubernetes]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created


# 验证集群DNS和集群网络成功
[root@m-01 kubernetes]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.96.0.2
Address 1: 10.96.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
/ # exit
Session ended, resume using 'kubectl attach test -c test -i -t' command when the pod is running
pod "test" deleted

部署Node节点

node需要部署哪些组件?

kubelet、kube-proxy、flannel

集群规划

192.168.188.83 node-01 n-01

集群优化

做一下免密登录

分发软件包

# 在workers节点操作

# m-01
[root@m-01 kubernetes]# cd /data/

[root@m-01 data]# for i in n-01;do scp flanneld mk-docker-opts.sh kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy root@$i:/usr/local/bin; done

分发证书

# 在workers节点操作

# m-01

[root@m-01 data]# for i in n-01; do ssh root@$i "mkdir -pv /etc/kubernetes/ssl"; scp -pr /etc/kubernetes/ssl/{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl; done

分发配置文件

flanneld、etcd的证书、docker.service
分发ETCD证书
# 在workers节点操作

# m-01

[root@m-01 data]# cd /etc/etcd/ssl/
[root@m-01 ssl]# for i in n-01 ;do ssh root@$i "mkdir -pv /etc/etcd/ssl"; scp ./*  root@$i:/etc/etcd/ssl; done
分发flannel和docker的启动脚本
[root@m-01 ssl]# for i in n-01;do scp /usr/lib/systemd/system/docker.service root@$i:/usr/lib/systemd/system/docker.service; scp /usr/lib/systemd/system/flanneld.service root@$i:/usr/lib/systemd/system/flanneld.service; done

部署kubelet

[root@m-01 ssl]# cd

[root@m-01 ~]# for i in n-01 ;do 
    ssh root@$i "mkdir -pv  /etc/kubernetes/cfg";
    scp /etc/kubernetes/cfg/kubelet.conf root@$i:/etc/kubernetes/cfg/kubelet.conf; 
    scp /etc/kubernetes/cfg/kubelet-config.yml root@$i:/etc/kubernetes/cfg/kubelet-config.yml; 
    scp /usr/lib/systemd/system/kubelet.service root@$i:/usr/lib/systemd/system/kubelet.service; 
    scp /etc/kubernetes/cfg/kubelet.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet.kubeconfig; 
    scp /etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig; 
    scp /etc/kubernetes/cfg/token.csv root@$i:/etc/kubernetes/cfg/token.csv;
done
修改配置文件kubelet-config.yml和kubelet.conf
[root@n-01 cfg]# vim /etc/kubernetes/cfg/kubelet-config.yml
改: address: 192.168.188.80
为: address: 192.168.188.83


[root@n-01 cfg]# vim /etc/kubernetes/cfg/kubelet.conf
改: --hostname-override=m-01
为: --hostname-override=n-01
启动kubelet
[root@n-01 cfg]# systemctl enable --now kubelet.service

[root@n-01 cfg]# systemctl status kubelet

# 先修改/etc/docker/daemon.json里面的驱动,上面有说
[root@n-01 cfg]# systemctl restart docker

部署kube-proxy

# 在workers节点操作

# m-01

[root@m-01 ~]# for i in n-01 ; do 
    scp /etc/kubernetes/cfg/kube-proxy.conf root@$i:/etc/kubernetes/cfg/kube-proxy.conf;  
    scp /etc/kubernetes/cfg/kube-proxy-config.yml root@$i:/etc/kubernetes/cfg/kube-proxy-config.yml ;  
    scp /usr/lib/systemd/system/kube-proxy.service root@$i:/usr/lib/systemd/system/kube-proxy.service;  
    scp /etc/kubernetes/cfg/kube-proxy.kubeconfig root@$i:/etc/kubernetes/cfg/kube-proxy.kubeconfig; 
done
修改kube-proxy-config.yml中IP和主机名
[root@n-01 cfg]# vim /etc/kubernetes/cfg/kube-proxy-config.yml

改: bindAddress: 192.168.188.80
为: bindAddress: 192.168.188.83

改: hostnameOverride: m-01
为: hostnameOverride: n-01


[root@n-01 cfg]# mkdir -p /var/log/kubernetes
启动
[root@n-01 cfg]# systemctl enable --now kube-proxy.service

[root@n-01 cfg]# systemctl status kube-proxy.service

加入集群

在workers节点操作

m-01

# 查看集群状态

[root@m-01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}  
查看加入集群请求
[root@m-01 ~]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           REQUESTEDDURATION   CONDITION
node-csr-sNwLNbnLtEB73zGYVDQQ10Fb3qOkveuTLglnAov09m8   5m58s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Pending
批准加入
[root@m-01 ~]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
查看加入状态
[root@m-01 ~]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           REQUESTEDDURATION   CONDITION
node-csr-sNwLNbnLtEB73zGYVDQQ10Fb3qOkveuTLglnAov09m8   7m53s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Approved,Issued
查看加入节点
[root@m-01 ~]# kubectl get nodes
NAME   STATUS   ROLES    AGE   VERSION
m-01   Ready    <none>   14h   v1.22.10
m-02   Ready    <none>   14h   v1.22.10
m-03   Ready    <none>   14h   v1.22.10
n-01   Ready    <none>   14s   v1.22.10

设置集群角色

打标签,在workers节点操作

[root@m-01 ~]# kubectl label node m-01 kubernetes.io/role=master --overwrite
[root@m-01 ~]# kubectl label node m-02 kubernetes.io/role=master --overwrite
[root@m-01 ~]# kubectl label node m-03 kubernetes.io/role=master --overwrite
[root@m-01 ~]# kubectl label node n-01 kubernetes.io/role=bus --overwrite

验证

[root@m-01 ~]# kubectl get nodes
NAME   STATUS   ROLES    AGE   VERSION
m-01   Ready    master   14h   v1.22.10
m-02   Ready    master   15h   v1.22.10
m-03   Ready    master   15h   v1.22.10
n-01   Ready    bus      52m   v1.22.10

安装集群图形化界面

https://github.com/kubernetes/dashboard

准备yaml文件

[root@m-01 ~]# mkdir -p /data/yaml/
[root@m-01 ~]# cd /data/yaml/
[root@m-01 yaml]# vi kubernetes-dashboard.yml

安装

[root@m-01 yaml]# kubectl apply -f kubernetes-dashboard.yml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

开一个端口,用于访问

[root@m-01 yaml]# kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard
#type: ClusterIP   =>  type: NodePort
改: 33   type: ClusterIP
为: 33   type: NodePort

查看修改后得端口

[root@m-01 yaml]# kubectl get svc -n kubernetes-dashboard   # 端口范围当初设置的大,所以现在四千多
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.96.46.138    <none>        8000/TCP        2m43s
kubernetes-dashboard        NodePort    10.96.227.248   <none>        443:43677/TCP   2m44s

创建token配置文件

这一步上面已经做过了,可以省略;未实践

[root@m-01 yaml]# vim token.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
部署token到集群
[root@m-01 yaml]# kubectl apply -f token.yaml

获取token

这步开始操作

[root@m-01 yaml]# kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token

web操作

# 浏览器输入 https://192.168.188.80:43677/

# 去官网https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/找这个yaml文件apply看一下操作面板的变化

ubernetes-dashboard.yml




### 安装

```shell
[root@m-01 yaml]# kubectl apply -f kubernetes-dashboard.yml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

开一个端口,用于访问

[root@m-01 yaml]# kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard
#type: ClusterIP   =>  type: NodePort
改: 33   type: ClusterIP
为: 33   type: NodePort

查看修改后得端口

[root@m-01 yaml]# kubectl get svc -n kubernetes-dashboard   # 端口范围当初设置的大,所以现在四千多
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.96.46.138    <none>        8000/TCP        2m43s
kubernetes-dashboard        NodePort    10.96.227.248   <none>        443:43677/TCP   2m44s

创建token配置文件

这一步上面已经做过了,可以省略;未实践

[root@m-01 yaml]# vim token.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
部署token到集群
[root@m-01 yaml]# kubectl apply -f token.yaml

获取token

这步开始操作

[root@m-01 yaml]# kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token

web操作

# 浏览器输入 https://192.168.188.80:43677/

# 去官网https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/找这个yaml文件apply看一下操作面板的变化

[外链图片转存中…(img-Xav1sVid-1674898785454)]

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐