一 单master节点用二进制部署K8S集群

1.1 环境部署

主机名IP地址所需部署组件
master20.0.0.101kube-apiserver kube-controller-manager kube-scheduler etcd
node0120.0.0.102kubelet kube-proxy docker flannel etcd
node0220.0.0.103kubelet kube-proxy docker flannel etcd

1.2 部署K8S集群中会用到的自签SSL证书

组件使用的证书
etcdca.pem,server.pem,server-key.pem
flannelca.pem,server.pem,server-key.pem
kube-apiserverca.pem,server.pem,server-key.pem
kubeletca.pem,ca-key.pem
kube-proxyca.pem,kube-proxy.pem,kube-proxy-key.pem
kubectlca.pem,admin-pem,admin-key.pem

1.3 实验操作步骤

1.3.1 部署Etcd群集

##在master上操作##
1. 设置主机名
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# su

2. 关闭防火墙,核心防护
[root@master ~]# systemctl stop firewalld
[root@master ~]# setenforce 0
setenforce: SELinux is disabled

3.创建k8s目录
[root@master ~]# mkdir k8s
[root@master ~]# cd k8s/
[root@master k8s]# rz -E		##上传脚本
rz waiting to receive.
[root@master k8s]# ls
etcd-cert.sh  etcd.sh		

4. 创建etcd-cert目录
[root@master k8s]# mkdir etcd-cert
[root@master k8s]# mv etcd-cert.sh etcd-cert	##把脚本移到etcd-cert目录下

5. 下载cfssl官方包
[root@master k8s]# vim cfssl.sh	
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
[root@master k8s]# bash cfssl.sh 
[root@master k8s]# ls /usr/local/bin/
cfssl  cfssl-certinfo  cfssljson

6. 定义ca证书
[root@master k8s]# cd etcd-cert/
[root@master etcd-cert]# ls
etcd-cert.sh
[root@master etcd-cert]# cat > ca-config.json <<EOF		##定义ca证书
 {
   "signing": {
     "default": {
       "expiry": "87600h"
     },
     "profiles": {
       "www": {
          "expiry": "87600h",
          "usages": [
             "signing",
             "key encipherment",
             "server auth",
             "client auth"
         ]
       }
     }
   }
 }
 EOF
[root@master etcd-cert]# ls
ca-config.json  etcd-cert.sh

7. 实现证书签名
[root@master etcd-cert]# cat > ca-csr.json <<EOF			##实现证书签名
 {
     "CN": "etcd CA",
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "Nanjing",
             "ST": "Nanjing"
         }
     ]
 }
 EOF
[root@master etcd-cert]# ls
ca-config.json  ca-csr.json  etcd-cert.sh

8. 生产证书
[root@master etcd-cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -		
2020/11/18 22:16:37 [INFO] generating a new CA key and certificate from CSR
2020/11/18 22:16:37 [INFO] generate received request
2020/11/18 22:16:37 [INFO] received CSR
2020/11/18 22:16:37 [INFO] generating key: rsa-2048
2020/11/18 22:16:37 [INFO] encoded CSR
2020/11/18 22:16:37 [INFO] signed certificate with serial number 343123254687290934788414439626763995945456
[root@master etcd-cert]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  etcd-cert.sh

9. 指定etcd三个节点之间的通信验证
[root@master etcd-cert]# cat > server-csr.json <<EOF 
     "CN": "etcd",
     "hosts": [
     "20.0.0.101",
     "20.0.0.102",
     "20.0.0.103"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "NanJing",
             "ST": "NanJing"
         }
     ]
 }
 EOF

10. 生成ETCD证书 server-key.pem server.pem
[root@master etcd-cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
2020/11/18 22:26:57 [INFO] generate received request
2020/11/18 22:26:57 [INFO] received CSR
2020/11/18 22:26:57 [INFO] generating key: rsa-2048
2020/11/18 22:26:57 [INFO] encoded CSR
2020/11/18 22:26:57 [INFO] signed certificate with serial number 373807666387324040512190709618027818612657599815
2020/11/18 22:26:57 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master etcd-cert]# ls
ca-config.json  ca-csr.json  ca.pem        server.csr       server-key.pem
ca.csr          ca-key.pem   etcd-cert.sh  server-csr.json  server.pem

11. 上传二进制包
[root@master etcd-cert]# cd ..
[root@master k8s]# rz -E		##上传三个包源(二进制包下载地址:https://github.com/etcd-io/etcd/releases)
rz waiting to receive.
[root@master k8s]# ls
cfssl.sh   etcd.sh                          flannel-v0.10.0-linux-amd64.tar.gz
etcd-cert  etcd-v3.3.10-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz

12. 解压etcd压缩包
[root@master k8s]# tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
[root@master k8s]# ls
cfssl.sh   etcd.sh                   etcd-v3.3.10-linux-amd64.tar.gz     kubernetes-server-linux-amd64.tar.gz
etcd-cert  etcd-v3.3.10-linux-amd64  flannel-v0.10.0-linux-amd64.tar.gz
[root@master k8s]# cd etcd-v3.3.10-linux-amd64/
[root@master etcd-v3.3.10-linux-amd64]# ls
Documentation  etcd  etcdctl  README-etcdctl.md  README.md  READMEv2-etcdctl.md

13 创建/opt/etcd/下bin cfg ssl三个目录
[root@master etcd-v3.3.10-linux-amd64]# mkdir -p /opt/etcd/{cfg,bin,ssl}
[root@master etcd-v3.3.10-linux-amd64]# ls /opt/etcd/
bin  cfg  ssl

14. 证书拷贝
[root@master etcd-v3.3.10-linux-amd64]# mv etcd etcdctl /opt/etcd/bin/		
[root@master etcd-v3.3.10-linux-amd64]# cd ..
[root@master k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/
[root@master k8s]# ls /opt/etcd/bin/
etcd  etcdctl
[root@master k8s]# ls /opt/etcd/ssl/
ca-key.pem  ca.pem  server-key.pem  server.pem

15. 启动etcd脚本
[root@master k8s]# bash etcd.sh etcd01 20.0.0.101 etcd02=https://20.0.0.102:2380,etcd03=https://20.0.0.103:2380		##进入卡住状态等待其他节点加入

##打开另外一个会话终端##
1. 发现etcd进程已经开启
[root@master ~]# ps -ef | grep etcd
root       5683      1  4 23:00 ?        00:00:01 /opt/etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=https://20.0.0.101:2380 --listen-client-urls=https://20.0.0.101:2379,http://127.0.0.1:2379 --advertise-client-urls=https://20.0.0.101:2379 --initial-advertise-peer-urls=https://20.0.0.101:2380 --initial-cluster=etcd01=https://20.0.0.101:2380,etcd02=https://20.0.0.102:2380,etcd03=https://20.0.0.103:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
root       5708   5562  0 23:01 pts/0    00:00:00 grep --color=auto etcd

2. 拷贝证书到其他节点上去
[root@master ~]# scp -r /opt/etcd/ root@20.0.0.102:/opt/		
[root@master ~]# scp -r /opt/etcd/ root@20.0.0.103:/opt/

3. 拷贝启动脚本到其他节点上去
[root@master ~]# scp /usr/lib/systemd/system/etcd.service root@20.0.0.102:/usr/lib/systemd/system/
[root@master ~]# scp /usr/lib/systemd/system/etcd.service root@20.0.0.103:/usr/lib/systemd/system/

##在node01上操作##
1. 设置主机名
[root@localhost ~]# hostnamectl set-hostname node01
[root@localhost ~]# su

2. 关闭防火墙以及核心防护
[root@node01 ~]# systemctl stop firewalld
[root@node01 ~]# setenforce 0

3. 修改etcd配置文件
[root@node01 system]# cd /opt/etcd/cfg/
[root@node01 cfg]# vim etcd
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://20.0.0.102:2380"
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.102:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.102:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.102:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.101:2380,etcd02=https://20.0.0.102:2380,etcd03=https://20.0.0.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"



##在node02上操作##
1. 设置主机名
[root@localhost ~]# hostnamectl set-hostname node02
[root@localhost ~]# su

2. 关闭防火墙以及核心防护
[root@node02 ~]# systemctl stop firewalld
[root@node02 ~]# setenforce 0

3. 修改etcd配置文件
[root@node02 ~]# cd /opt/etcd/cfg/
[root@node02 cfg]# vim etcd 
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://20.0.0.103:2380"
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.103:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.103:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.103:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.101:2380,etcd02=https://20.0.0.102:2380,etcd03=https://20.0.0.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


##在master上操作##
1. 启动etcd脚本(之前因为超时自动关闭脚本)
[root@master k8s]# bash etcd.sh etcd01 20.0.0.101 etcd02=https://20.0.0.102:2380,etcd03=https://20.0.0.103:2380

##在node01上操作##
1. 开启etcd服务
[root@node01 cfg]# systemctl start etcd.service

##在node02上操作##
1. 开启etcd服务
[root@node02 cfg]# systemctl start etcd.service 

##在master上操作##
1. 检查群集状态 
[root@master k8s]# cd etcd-cert/
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.101:2379,https://20.0.0.102:2379,https://20.0.0.103:2379" cluster-health
member 5524f29b260d7efb is healthy: got healthy result from https://20.0.0.103:2379
member a7c6cedb7bf4af4d is healthy: got healthy result from https://20.0.0.102:2379
member c57213b503e56d00 is healthy: got healthy result from https://20.0.0.101:2379
cluster is healthy

1.3.2 docker引擎部署

所有node节点部署docker引擎
相关操作命令请查看之前相关博客

1.3.3 flannel网络配置

##在master上操作##
1. 写入分配的子网段到Etcd中,供flannel使用
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.101:2379,https://20.0.0.102:2379,https://20.0.0.103:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

2. 查看写入的信息
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.101:2379,https://20.0.0.102:2379,https://20.0.0.103:2379" get /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}


##在node节点(所有)上操作##
以下以node01为参考

1. 拷贝flannel包
[root@node01 ~]# rz -E	
rz waiting to receive.
[root@node01 ~]# ls
anaconda-ks.cfg                     initial-setup-ks.cfg  模板  图片  下载  桌面
flannel-v0.10.0-linux-amd64.tar.gz  公共                  视频  文档  音乐

2. 解压
[root@node01 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
flanneld
mk-docker-opts.sh
README.md

3. 创建k8s工作目录,移动脚本
[root@node01 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl}
[root@node01 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/

4. 拷贝flannel.sh脚本
[root@node02 ~]# rz -E
rz waiting to receive.
[root@node02 ~]# ls
anaconda-ks.cfg  flannel-v0.10.0-linux-amd64.tar.gz  README.md  模板  图片  下载  桌面
flannel.sh       initial-setup-ks.cfg                公共       视频  文档  音乐

5. 开启flannel网络功能
[root@node01 ~]# bash flannel.sh https://20.0.0.101:2379,https://20.0.0.102:2379,https://20.0.0.103:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.


6. 配置docker连接flannel
[root@node01 ~]# vim /usr/lib/systemd/system/docker.service
  9 [Service]
 10 Type=notify
 11 # the default is not to use systemd for cgroups because the delegate issues still
 12 # exists and systemd currently does not support the cgroup feature set required
 13 # for containers run by docker
 14 EnvironmentFile=/run/flannel/subnet.env 
 15 ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
 16 ExecReload=/bin/kill -s HUP $MAINPID
 17 TimeoutSec=0
 18 RestartSec=2
 19 Restart=always

7. 查看ip地址
[root@node01 ~]# ifconfig 
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
        ether 02:42:28:98:7f:84  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 20.0.0.102  netmask 255.255.255.0  broadcast 20.0.0.255
        inet6 fe80::fe29:c7a4:fda8:5c23  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:3b:9f:ce  txqueuelen 1000  (Ethernet)
        RX packets 746307  bytes 918528183 (875.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 261063  bytes 23835901 (22.7 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.51.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::581f:5dff:fee6:f68e  prefixlen 64  scopeid 0x20<link>
        ether 5a:1f:5d:e6:f6:8e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 27 overruns 0  carrier 0  collisions 0

8. 重启docker服务
[root@node01 ~]# systemctl daemon-reload 
[root@node01 ~]# systemctl restart docker

9. 查看docker 0地址变换
[root@node01 ~]# ifconfig 
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.51.1  netmask 255.255.255.0  broadcast 172.17.51.255
        ether 02:42:28:98:7f:84  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 20.0.0.102  netmask 255.255.255.0  broadcast 20.0.0.255
        inet6 fe80::fe29:c7a4:fda8:5c23  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:3b:9f:ce  txqueuelen 1000  (Ethernet)
        RX packets 786497  bytes 964790021 (920.0 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 275995  bytes 25384674 (24.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.51.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::581f:5dff:fee6:f68e  prefixlen 64  scopeid 0x20<link>
        ether 5a:1f:5d:e6:f6:8e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 27 overruns 0  carrier 0  collisions 0

10. 测试ping通对方docker 0 网卡,证明fannnel起到路由作用
[root@node01 ~]# docker run -it centos:7 /bin/bash
[root@de3d5b7fd1a6 /]# yum -y install net-tools
[root@de3d5b7fd1a6 /]# ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.51.2  netmask 255.255.255.0  broadcast 172.17.51.255
        ether 02:42:ac:11:33:02  txqueuelen 0  (Ethernet)
        RX packets 14736  bytes 11501066 (10.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4972  bytes 271904 (265.5 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
##node02的ip为172.17.95.2

[root@de3d5b7fd1a6 /]# ping 172.17.95.2
PING 172.17.95.2 (172.17.95.2) 56(84) bytes of data.
64 bytes from 172.17.95.2: icmp_seq=1 ttl=62 time=0.593 ms
64 bytes from 172.17.95.2: icmp_seq=2 ttl=62 time=0.497 ms

1.3.4 部署master

##在master上操作##
1. 修改k8s-cert.sh配置文件
[root@master k8s]# rz -E		##上传mater压缩包
rz waiting to receive.
[root@master k8s]# ls
cfssl.sh   etcd-v3.3.10-linux-amd64            kubernetes-server-linux-amd64.tar.gz
etcd-cert  etcd-v3.3.10-linux-amd64.tar.gz     master.zip
etcd.sh    flannel-v0.10.0-linux-amd64.tar.gz

[root@master k8s]# unzip master.zip 
[root@master k8s]# mkdir -p /opt/kubernetes/{cfg,bin,ssl}
[root@master k8s]# mkdir k8s-cert
[root@master k8s]# cd k8s-cert/
[root@master k8s-cert]# rz -E
rz waiting to receive.
[root@master k8s-cert]# ls
k8s-cert.sh

[root@master k8s-cert]# vim k8s-cert.sh
#-----------------------

cat > kube-proxy-csr.json <<EOF
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Nanjing",
            "ST": "Nanjing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "20.0.0.101",		##master01
      "20.0.0.104",		##master02
      "20.0.0.100",		##vip
      "20.0.0.105",		##nginx
      "20.0.0.106",		##nginx
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "NanJing",
            "ST": "NanJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF


#-----------------------

cat > admin-csr.json <<EOF
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "NanJing",
      "ST": "NanJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF


#-----------------------

cat > kube-proxy-csr.json <<EOF
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "NanJing",
      "ST": "NanJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

2. 生成证书
[root@master k8s-cert]# bash k8s-cert.sh
[root@master k8s-cert]# ls *pem
admin-key.pem  ca-key.pem  kube-proxy-key.pem  server-key.pem
admin.pem      ca.pem      kube-proxy.pem      server.pem

3. 复制证书到工作目录
[root@master k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/

4. 解压k8s服务器端压缩包
[root@master k8s-cert]# cd ..
[root@master k8s]# rz -E		##上传 kubernetes-server-linux-amd64.tar.gz.0
rz waiting to receive.
[root@master k8s]# ls
apiserver.sh                     flannel-v0.10.0-linux-amd64.tar.gz
cfssl.sh                         k8s-cert
controller-manager.sh            kubernetes-server-linux-amd64.tar.gz
etcd-cert                        kubernetes-server-linux-amd64.tar.gz.0
etcd.sh                          master.zip
etcd-v3.3.10-linux-amd64         scheduler.sh
etcd-v3.3.10-linux-amd64.tar.gz
[root@master k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz0
[root@master k8s]# cd /root/k8s/kubernetes/server/bin/

5. 复制关键命令文件
[root@master bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
[root@master bin]# cd /root/k8s/

6. 随机生成序列号
[root@master k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
0e06b7e29b2cdbd9e71d69f8124c02a4

7. 编辑token二进制文件
[root@master k8s]# vim /opt/kubernetes/cfg/token.csv
0e06b7e29b2cdbd9e71d69f8124c02a4,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

8. 开启apiserver
[root@master k8s]# bash apiserver.sh 20.0.0.101 https://20.0.0.101:2379.https://20.0.0.102:2379,https://20.0.0.103:2379

9. 检查进程是否启动成功
[root@master k8s]# ps aux | grep kube

10. 查看配置文件
[root@master k8s]# cat /opt/kubernetes/cfg/kube-apiserver 

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://20.0.0.101:2379.https://20.0.0.102:2379,https://20.0.0.103:2379 \
--bind-address=20.0.0.101 \
--secure-port=6443 \
--advertise-address=20.0.0.101 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

11. 检查监听的https端口
[root@master k8s]# netstat -ntap | grep 6443
tcp        0      0 20.0.0.101:6443         0.0.0.0:*               LISTEN      8735/kube-apiserver 
tcp        0      0 20.0.0.101:50478        20.0.0.101:6443         ESTABLISHED 8735/kube-apiserver 
tcp        0      0 20.0.0.101:6443         20.0.0.101:50478        ESTABLISHED 8735/kube-apiserver 
[root@master k8s]# netstat -ntap | grep 8080
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      8735/kube-apiserver 

12. 启动scheduler服务
[root@master k8s]# ./scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.

13. 检查进程
[root@master k8s]# ps aux | grep ku

14. 授权controller-manager.sh 脚本文件,并启动controller-manager
[root@master k8s]# chmod +x controller-manager.sh 
[root@master k8s]# ./controller-manager.sh 127.0.0.1

15. 查看master节点状态
[root@master k8s]# /opt/kubernetes/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"} 


1.3.5 node01部署

##在master上操作##
1. 把 kubelet、kube-proxy拷贝到node节点上去
[root@master k8s]# /root/k8s/kubernetes/server/bin
[root@master bin]# scp kubelet kube-proxy root@20.0.0.101:/opt/kubernetes/bin/
[root@master bin]# scp kubelet kube-proxy root@20.0.0.102:/opt/kubernetes/bin/


##在node01上操作##
1. 上传node.zip压缩包,并解压缩获得kubelet.sh proxy.sh两个脚本
[root@node01 ~]# ls		##上传node.zip压缩包
anaconda-ks.cfg                       公共  文档
flannel.sh                          node.zip    模板  下载
flannel-v0.10.0-linux-amd64.tar.gz    视频  音乐
initial-setup-ks.cfg                README.md   图片  桌面
[root@node01 ~]# unzip node.zip
[root@node01 ~]# ls	
anaconda-ks.cfg                     kubelet.sh  公共  文档
flannel.sh                          node.zip    模板  下载
flannel-v0.10.0-linux-amd64.tar.gz  proxy.sh    视频  音乐
initial-setup-ks.cfg                README.md   图片  桌面


##在master上操作##
1. 创建 kubeconfig 目录,并上传kubeconfig.sh压缩包
[root@master bin]# cd /root/k8s/
[root@master k8s]# mkdir kubeconfig/
[root@master k8s]# cd kubeconfig/		##上传kubeconfig.sh压缩包
[root@master kubeconfig]# mv kubeconfig.sh kubeconfig 	##重命令

2. 查看token文件,获取token信息
[root@master kubeconfig]# cat /opt/kubernetes/cfg/token.csv 
0e06b7e29b2cdbd9e71d69f8124c02a4,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

3. 修改kubeconfig 配置文件
[root@master kubeconfig]# vim kubeconfig 
10dd	##删除以下部分
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=0e06b7e29b2cdbd9e71d69f8124c02a4 \		##改为tokenID号
  --kubeconfig=bootstrap.kubeconfig

4. 设置环境变量
[root@master kubeconfig]#  export PATH=$PATH:/opt/kubernetes/bin/

5. 查看master节点状态
[root@master kubeconfig]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}  

6. 生成配置文件
[root@master kubeconfig]# bash kubeconfig 20.0.0.101 /root/k8s/k8s-cert/
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
[root@master kubeconfig]# ls
bootstrap.kubeconfig  kubeconfig  kube-proxy.kubeconfig

7. 拷贝配置文件到node节点上
[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.102:/opt/kubernetes/cfg/
[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.103:/opt/kubernetes/cfg/

8. 创建bootstrap角色赋予权限用于连接apiserver请求签名
[root@master kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created


##在node01上操作##
[root@node01 ~]# bash kubelet.sh 20.0.0.102
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@node01 ~]# ps aux | grep kube
root      72371  1.7  2.3 462636 44052 ?        Ssl  16:32   0:00 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=20.0.0.102 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrapkubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root      72393  0.0  0.0 112676   984 pts/1    S+   16:32   0:00 grep --color=auto kube

[root@node01 ~]# systemctl status kubelet.service 


##在master上操作##
1. 检查到node01节点的请求
[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE    REQUESTOR           CONDITION
node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA   108s   kubelet-bootstrap   Pending(等待集群给该节点颁发证书)

2. 授权许可加入群集
[root@master kubeconfig]# kubectl certificate approve node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA
certificatesigningrequest.certificates.k8s.io/node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA approved

3. 继续查看证书状态
[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA   2m45s   kubelet-bootstrap   Approved,Issued(已经被允许加入集群)

4. 查看集群节点,成功加入node01节点
[root@master kubeconfig]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
20.0.0.102   Ready    <none>   88s   v1.12.3


##在node01上操作##
1. 启动proxy服务
[root@node01 ~]# bash proxy.sh 20.0.0.102
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node01 ~]# systemctl status kube-proxy.service 

1.3.6 node02部署

##在node01上操作##
1. 把现成的/opt/kubernetes目录复制到其他节点进行修改即可
[root@node01 ~]# scp -r /opt/kubernetes/ root@20.0.0.103:/opt/
The authenticity of host '20.0.0.103 (20.0.0.103)' can't be established.
ECDSA key fingerprint is SHA256:VRQQjBblsepX07JhOVfCoYpsNpLFhL1IOnxV8egVqeM.
ECDSA key fingerprint is MD5:3f:57:d7:20:c8:3f:8d:91:2b:5a:43:e5:49:6c:94:25.
Are you sure you want to continue connecting (yes/no)? yes	
Warning: Permanently added '20.0.0.103' (ECDSA) to the list of known hosts.
root@20.0.0.103's password: 
flanneld                                           100%  226    81.9KB/s   00:00    
bootstrap.kubeconfig                               100% 2164     2.3MB/s   00:00    
kube-proxy.kubeconfig                              100% 6266    11.0MB/s   00:00    
kubelet                                            100%  374   482.2KB/s   00:00    
kubelet.config                                     100%  264   358.9KB/s   00:00    
kubelet.kubeconfig                                 100% 2293     1.4MB/s   00:00    
kube-proxy                                         100%  186   334.8KB/s   00:00    
mk-docker-opts.sh                                  100% 2139     1.9MB/s   00:00    
flanneld                                           100%   35MB 120.5MB/s   00:00    
kubelet                                            100%  168MB 137.2MB/s   00:01    
kube-proxy                                         100%   48MB 124.4MB/s   00:00    
kubelet.crt                                        100% 2169     2.2MB/s   00:00    
kubelet.key                                        100% 1675     2.1MB/s   00:00    
kubelet-client-2020-11-23-16-35-00.pem             100% 1269     1.9MB/s   00:00    
kubelet-client-current.pem                         100% 1269     2.4MB/s   00:00 

2. 把kubelet,kube-proxy的service文件拷贝到node2中
[root@node01 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@20.0.0.103:/usr/lib/systemd/system/
root@20.0.0.103's password: 
kubelet.service                                    100%  264   127.4KB/s   00:00    
kube-proxy.service                                 100%  231   297.6KB/s   00:00  


##在node02上操作##
1. 修改kubelet  kubelet.config kube-proxy(三个配置文件)
[root@node02 ssl]# cd ../cfg/
[root@node02 cfg]# ls
bootstrap.kubeconfig  kubelet         kubelet.kubeconfig  kube-proxy.kubeconfig
flanneld              kubelet.config  kube-proxy

[root@node02 cfg]# vim kubelet
KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=20.0.0.103 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

[root@node02 cfg]# vim kubelet.config 
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 20.0.0.103
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true

[root@node02 cfg]# vim kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=20.0.0.103 \
--cluster-cidr=10.0.0.0/24 \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

2. 删除复制过来的证书
[root@node02 cfg]# cd ../ssl/
[root@node02 ssl]# ls
kubelet-client-2020-11-23-16-35-00.pem  kubelet.crt
kubelet-client-current.pem              kubelet.key
[root@node02 ssl]# rm -rf *
[root@node02 ssl]# ls

3. 开启kubelet,kube-proxy服务,自行申请证书
[root@node02 ssl]# systemctl start kubelet.service 
[root@node02 ssl]# systemctl enable kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node02 ssl]# systemctl start kube-proxy.service
[root@node02 ssl]# systemctl enable kube-proxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.


##在master上操作##
1. 查看node02节点请求
[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-FhNT5zT9Td5VPKxNlnop9siyqFabWKB3h8nj0GMapAo   24s   kubelet-bootstrap   Pending
node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA   14m   kubelet-bootstrap   Approved,Issued

2. 授权许可加入群集
[root@master kubeconfig]# kubectl certificate approve node-csr-FhNT5zT9Td5VPKxNlnop9siyqFabWKB3h8nj0GMapAo
certificatesigningrequest.certificates.k8s.io/node-csr-FhNT5zT9Td5VPKxNlnop9siyqFabWKB3h8nj0GMapAo approved

3. 再次查看node02节点请求状态
[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-FhNT5zT9Td5VPKxNlnop9siyqFabWKB3h8nj0GMapAo   82s   kubelet-bootstrap   Approved,Issued
node-csr-jrMTiwW__xVya1IC6GlUyFGpl8g34YMy3z_xrZiFscA   15m   kubelet-bootstrap   Approved,Issued

4. 查看集群中的节点
[root@master kubeconfig]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
20.0.0.102   Ready    <none>   13m   v1.12.3
20.0.0.103   Ready    <none>   20s   v1.12.3
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐