k8s的二进制部署实验,单节点,多节点

k8s官网地址:https://github.com/kubernetes/kubernetes/releases?after=v1.13.1
ETCD 二进制包地址:https://github.com/etcd-io/etcd/releases

一、实验环境:

Master节点
master01:192.168.130.10/24
master02:192.168.130.40/24
安装: kube-apiserver kube-controller-manager kube-scheduler etcd
Node节点
node01:192.168.130.30/24
node02:192.168.130.20/24
安装:kubelet kube-proxy docker flannel etcd
负载均衡
Nginx01:192.168.130.50/24
Nginx02:192.168.130.60/24
安装:Nginx、keepalived

二、实验步骤

1、单节点部署:

master操作:

首先制作证书,做一个etcd的群集
[root@localhost ~]# mkdir k8s
[root@localhost ~]# cd k8s/
[root@localhost k8s]# mkdir etcd-cert
[root@localhost k8s]# ls
etcd-cert
//下载证书制作工具
[root@localhost k8s]# vim cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
[root@localhost k8s]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
//下载cfssl官方包
[root@localhost k8s]# bash cfssl.sh
[root@localhost k8s]# ls /usr/local/bin/
cfssl  cfssl-certinfo  cfssljson   ##三个制作证书的工具
开始制作证书:cfssl 生成证书工具   cfssljson通过传入json文件生成证书 cfssl-certinfo查看证书信息
[root@localhost etcd-cert]# vim etcd-cert.sh   ##创建证书的脚本
##定义ca证书
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
## 实现证书签名
cat > ca-csr.json <<EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -  ##生产证书

#-----------------------
## 指定etcd三个节点之间的通信验证
cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.130.10",
    "192.168.130.20",
    "192.168.130.30"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server  ##生成ETCD证书 server-key.pem   server.pem
[root@localhost etcd-cert]# chmod +x etcd-cert.sh 
[root@localhost etcd-cert]# ls
etcd-cert.sh
[root@localhost etcd-cert]# ./etcd-cert.sh 
2020/10/03 13:36:15 [INFO] generating a new CA key and certificate from CSR
2020/10/03 13:36:15 [INFO] generate received request
2020/10/03 13:36:15 [INFO] received CSR
2020/10/03 13:36:15 [INFO] generating key: rsa-2048
2020/10/03 13:36:16 [INFO] encoded CSR
2020/10/03 13:36:16 [INFO] signed certificate with serial number 541559024702405654248587978375305356176324367528
2020/10/03 13:36:16 [INFO] generate received request
2020/10/03 13:36:16 [INFO] received CSR
2020/10/03 13:36:16 [INFO] generating key: rsa-2048
2020/10/03 13:36:16 [INFO] encoded CSR
2020/10/03 13:36:16 [INFO] signed certificate with serial number 272716331039428081077185003130789190715206774318
2020/10/03 13:36:16 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@localhost etcd-cert]# ls   ##证书已经生成
ca-config.json  ca-csr.json  ca.pem        server.csr       server-key.pem
ca.csr          ca-key.pem   etcd-cert.sh  server-csr.json  server.pem

开始部署etcd:我已经下载好了,直接复制进来就可以用

[root@localhost k8s]# ls
etcd-cert  etcd.sh  etcd-v3.3.10-linux-amd64.tar.gz
[root@localhost k8s]# mkdir /opt/etcd/{bin,cfg,ssl} -p
[root@localhost k8s]# tar zvxf etcd-v3.3.10-linux-amd64.tar.gz
[root@localhost k8s]# cd etcd-v3.3.10-linux-amd64/
[root@localhost etcd-v3.3.10-linux-amd64]# ls
Documentation  etcd  etcdctl  README-etcdctl.md  README.md  READMEv2-etcdctl.md
[root@localhost etcd-v3.3.10-linux-amd64]# mv etcd etcdctl /opt/etcd/bin/
[root@localhost etcd-v3.3.10-linux-amd64]# cd ../etcd-cert/
[root@localhost etcd-cert]# ls
ca-config.json  ca-csr.json  ca.pem        server.csr       server-key.pem
ca.csr          ca-key.pem   etcd-cert.sh  server-csr.json  server.pem
[root@localhost etcd-cert]# cp *.pem /opt/etcd/ssl/
[root@localhost etcd-cert]# cd ..
[root@localhost k8s]# ls
etcd-cert  etcd.sh  etcd-v3.3.10-linux-amd64  etcd-v3.3.10-linux-amd64.tar.gz
[root@localhost k8s]# vim etcd.sh    ##etcd的执行脚本,生成etcd的配置文件和执行脚本
#!/bin/bash
# example: ./etcd.sh etcd01 192.168.130.10 etcd02=https://192.168.130.20:2380,etcd03=https://192.168.130.30:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat <<EOF >$WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd
[root@localhost k8s]# bash etcd.sh etcd01 192.168.130.10 etcd02=https://192.168.130.20:2380,etcd03=https://192.168.130.30:2380
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
   ##  进入卡住状态等待其他节点加入
   ##  使用另外一个会话打开,会发现etcd进程已经开启
[root@localhost ~]# ps -ef | grep etcd

拷贝证书去其他节点

[root@localhost k8s]# scp -r /opt/etcd/ root@192.168.130.30:/opt/
[root@localhost k8s]# scp -r /opt/etcd/ root@192.168.130.20:/opt
## 启动脚本拷贝其他节点
[root@localhost k8s]# scp /usr/lib/systemd/system/etcd.service root@192.168.130.20:/usr/lib/systemd/system/
[root@localhost k8s]# scp /usr/lib/systemd/system/etcd.service root@192.168.130.30:/usr/lib/systemd/system/

在node1和node2节点上修改复制过来的文件

[root@localhost ~]# vim /opt/etcd/cfg/etcd

#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.130.20:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.130.20:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.130.20:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.130.20:2379"
......
##node2节点同样修改地址为本地地址30

##两台node启动etcd
[root@localhost ssl]# systemctl start etcd
[root@localhost ssl]# systemctl status etcd

master上检查群集状态

[root@localhost etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.130.10:2379,https://192.168.130.20:2379,https://192.168.130.30:2379" cluster-health
member 3eae9a550e2e3ec is healthy: got healthy result from https://192.168.195.151:2379
member 26cd4dcf17bc5cbd is healthy: got healthy result from https://192.168.195.150:2379
member 2fcd2df8a9411750 is healthy: got healthy result from https://192.168.195.149:2379
cluster is healthy
## 节点已经全部加入到etcd群集中

两个node节点安装docker容器(yum安装,此步骤就不再演示)
flannel网络配置

##写入分配的子网段到ETCD中,供flannel使用,master操作
[root@localhost etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.130.10:2379,https://192.168.130.20:2379,https://192.168.130.30:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}  ##分配的网段

node节点上部署flannel。两台节点操作相同

[root@192 ~]# ls
 flannel-v0.10.0-linux-amd64.tar.gz  
[root@192 ~]# tar zvxf flannel-v0.10.0-linux-amd64.tar.gz 
flanneld
mk-docker-opts.sh
README.md
[root@192 ~]# ls
 mk-docker-opts.sh   flannel-v0.10.0-linux-amd64.tar.gz  README.md  flanneld       
[root@192 ~]# mkdir /opt/kubernetes/{bin,ssl,cfg} -p   ##创建k8s工作目录
[root@192 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/
[root@192 ~]# vim flannel.sh   ##flannel脚本,生成配置文件和执行脚本
#!/bin/bash

ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS 
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld

## 开启flannel网络功能 
[root@192 ~]# bash flannel.sh https://192.168.130.10:2379,https://192.168.130.20:2379,https://192.168.130.30:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
## 配置docker连接flannel 
[root@localhost ~]# vim /usr/lib/systemd/system/docker.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env   ##添加此句话
##修改此字段
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --   containerd=/run/containerd/containerd.sock 
......
[root@localhost ~]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.42.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
//说明:bip指定启动时的子网
DOCKER_NETWORK_OPTIONS=" --bip=172.17.42.1/24 --ip-masq=false --mtu=1450" 
//重启docker服务
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart docker
//查看flannel网络
[root@localhost ~]# ifconfig
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.84.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::fc7c:e1ff:fe1d:224  prefixlen 64  scopeid 0x20<link>
        ether fe:7c:e1:1d:02:24  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 26 overruns 0  carrier 0  collisions 0

创建一个容器,进入容器然后互相ping对方的容器中IP地址,可以ping同。
接下来部署其他软件
部署master组件

//在master上操作,api-server生成证书
[root@localhost etcd-cert]# mkdir /opt/kubernetes/{bin,cfg,ssl} -p
[root@localhost etcd-cert]# cd ..
[root@localhost k8s]# mkdir k8s-cert
[root@localhost k8s]# cd k8s-cert/
[root@localhost k8s-cert]# vi k8s-cert.sh
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "192.168.130.10",  //master1
      "192.168.130.40",  //master2
      "192.168.130.100",  //vip
      "192.168.130.50",  //lb (master)
      "192.168.130.60",  //lb (backup)
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

//生成k8s证书
[root@localhost k8s-cert]# bash k8s-cert.sh 
[root@localhost k8s-cert]# ls
admin.csr       ca-config.json  ca.pem               kube-proxy-key.pem  server-key.pem
admin-csr.json  ca.csr          k8s-cert.sh          kube-proxy.pem      server.pem
admin-key.pem   ca-csr.json     kube-proxy.csr       server.csr
admin.pem       ca-key.pem      kube-proxy-csr.json  server-csr.json
[root@localhost k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/
//复制k8s的软件包,解压kubernetes压缩包
[root@localhost k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz
[root@localhost k8s]# cd /root/k8s/kubernetes/server/bin
//复制关键命令文件
[root@localhost bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
//使用 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 可以随机生成序列号
[root@localhost k8s]# vim /opt/kubernetes/cfg/token.csv
0fb61c46f8991b718eb38d27b605b008,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
序列号,用户名,id,角色
//二进制文件,token,证书都准备好,开启apiserver
[root@localhost k8s]# unzip master.zip   ##复制进来master软件包
Archive:  master.zip
  inflating: apiserver.sh            
  inflating: controller-manager.sh   
  inflating: scheduler.sh            
[root@localhost k8s]# ls
apiserver.sh           etcd-v3.3.10-linux-amd64         kubernetes-server-linux-amd64.tar.gz
controller-manager.sh  etcd-v3.3.10-linux-amd64.tar.gz  master.zip
etcd-cert              k8s-cert                         scheduler.sh
etcd.sh                kubernetes
[root@localhost k8s]# bash apiserver.sh 192.168.195.149 https://192.168.195.149:2379,https://192.168.195.150:2379,https://192.168.195.151:2379

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
//检查进程是否启动成功
[root@localhost k8s]# ps aux | grep kube
//查看配置文件
[root@localhost k8s]# cat /opt/kubernetes/cfg/kube-apiserver 

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.195.149:2379,https://192.168.195.150:2379,https://192.168.195.151:2379 \
--bind-address=192.168.195.149 \
--secure-port=6443 \
--advertise-address=192.168.195.149 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
//监听的https端口
[root@localhost k8s]# netstat -ntap | grep 6443
tcp        0      0 192.168.195.149:6443    0.0.0.0:*               LISTEN      46459/kube-apiserve 
tcp        0      0 192.168.195.149:6443    192.168.195.149:36806   ESTABLISHED 46459/kube-apiserve 
tcp        0      0 192.168.195.149:36806   192.168.195.149:6443    ESTABLISHED 46459/kube-apiserve 
[root@localhost k8s]# netstat -ntap | grep 8080
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      46459/kube-apiserve 
//启动scheduler服务
[root@localhost k8s]# ./scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@localhost k8s]# ps aux | grep ku
[root@localhost k8s]# chmod +x controller-manager.sh 
//启动controller-manager
[root@localhost k8s]# ./controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
//查看master 节点状态
[root@localhost k8s]# /opt/kubernetes/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}  

node节点部署

//master上操作
//把 kubelet、kube-proxy拷贝到node节点上去
[root@localhost bin]# scp kubelet kube-proxy root@192.168.130.20:/opt/kubernetes/bin/
root@192.168.195.150's password: 
kubelet                                                           100%  168MB  27.9MB/s   00:06    
kube-proxy                                                        100%   48MB  31.5MB/s   00:01    
[root@localhost bin]# scp kubelet kube-proxy root@192.168.130.30:/opt/kubernetes/bin/
root@192.168.195.151's password: 
kubelet                                                           100%  168MB  56.1MB/s   00:03    
kube-proxy                                                        100%   48MB  37.3MB/s   00:01    
//nod01节点操作(复制node.zip到/root目录下再解压)
[root@localhost ~]# ls
anaconda-ks.cfg  flannel-v0.10.0-linux-amd64.tar.gz  node.zip   公共  视频  文档  音乐
flannel.sh       initial-setup-ks.cfg                README.md  模板  图片  下载  桌面
//解压node.zip,获得kubelet.sh  proxy.sh 
[root@localhost ~]# unzip node.zip 

//在master上操作
[root@localhost k8s]# mkdir kubeconfig
[root@localhost k8s]# cd kubeconfig/
//拷贝kubeconfig.sh文件进行重命名
[root@localhost kubeconfig]# mv kubeconfig.sh kubeconfig
[root@localhost kubeconfig]# vim kubeconfig 
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#----------------------

APISERVER=$1
SSL_DIR=$2

# 创建kubelet bootstrapping kubeconfig 
export KUBE_APISERVER="https://$APISERVER:6443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#----------------------

# 创建kube-proxy kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=$SSL_DIR/kube-proxy.pem \
  --client-key=$SSL_DIR/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

----------------删除以下部分----------------------------------------------------------------------
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

//获取token信息,将序列号复制到kubeconfig文件中
[root@localhost ~]# cat /opt/kubernetes/cfg/token.csv 
6351d652249951f79c33acdab329e4c4,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
//配置文件修改为tokenID
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=6351d652249951f79c33acdab329e4c4 \
  --kubeconfig=bootstrap.kubeconfig
//设置环境变量(export PATH=$PATH:/opt/kubernetes/bin/写入到/etc/profile中)
[root@localhost kubeconfig]# vi /etc/profile
[root@localhost kubeconfig]# source /etc/profile
[root@localhost kubeconfig]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
//生成配置文件
[root@localhost kubeconfig]# bash kubeconfig 192.168.130.10 /root/k8s/k8s-cert/
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
[root@localhost kubeconfig]# ls
bootstrap.kubeconfig  kubeconfig  kube-proxy.kubeconfig
//拷贝配置文件到node节点
[root@localhost kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.130.20:/opt/kubernetes/cfg/
[root@localhost kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.130.30:/opt/kubernetes/cfg/
//创建bootstrap角色赋予权限用于连接apiserver请求签名(关键)
[root@localhost kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

//在node01节点上操作
[root@localhost ~]# bash kubelet.sh 192.168.130.20
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
//检查kubelet服务启动
[root@localhost ~]# ps aux | grep kube
//master上操作
//检查到node01节点的请求
[root@localhost kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A   4m27s   kubelet-bootstrap   Pending(等待集群给该节点颁发证书)

[root@localhost kubeconfig]# kubectl certificate approve node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A
certificatesigningrequest.certificates.k8s.io/node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A approved
//继续查看证书状态
[root@localhost kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A   8m56s   kubelet-bootstrap   Approved,Issued(已经被允许加入群集)
//查看群集节点,成功加入node01节点
[root@localhost kubeconfig]# kubectl get node    ##出错的话检查kubelet,要重启,如果kubelet出错,检查master端的kubeconfig环境,然后重启kubelet
NAME              STATUS   ROLES    AGE    VERSION
192.168.130.20   Ready    <none>   118s   v1.12.3
//在node01节点操作,启动proxy服务
[root@localhost ~]# bash proxy.sh 192.168.130.20
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@localhost ~]# systemctl status kube-proxy.service 

-node02节点部署

//在node01节点操作
//把现成的/opt/kubernetes目录复制到其他节点进行修改即可
[root@localhost ~]# scp -r /opt/kubernetes/ root@192.168.130.30:/opt/
//把kubelet,kube-proxy的service文件拷贝到node2中
[root@localhost ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.130.30:/usr/lib/systemd/system/
//在node02上操作,进行修改
//首先删除复制过来的证书,等会node02会自行申请证书
[root@localhost ~]# cd /opt/kubernetes/ssl/
[root@localhost ssl]# rm -rf *
//修改配置文件kubelet  kubelet.config kube-proxy(三个配置文件中的IP地址)
//启动服务
[root@localhost cfg]# systemctl start kubelet.service 
[root@localhost cfg]# systemctl enable kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@localhost cfg]# systemctl start kube-proxy.service 
[root@localhost cfg]# systemctl enable kube-proxy.service 
//在master上操作查看请求

[root@localhost k8s]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU   15s   kubelet-bootstrap   Pending
//授权许可加入群集
[root@localhost k8s]# kubectl certificate approve node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU
certificatesigningrequest.certificates.k8s.io/node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU approved
//查看群集中的节点
[root@localhost k8s]# kubectl get node
NAME              STATUS   ROLES    AGE   VERSION
192.168.130.20   Ready    <none>   21h   v1.12.3
192.168.130.30   Ready    <none>   37s   v1.12.3

以上单节点部署完成。

下面部署多节点,就是2个master
master02部署

//优先关闭防火墙和selinux服务
//在master01上操作
//复制kubernetes目录到master02
[root@localhost k8s]# scp -r /opt/kubernetes/ root@192.168.130.30:/opt
//复制master中的三个组件启动脚本kube-apiserver.service                   kube-controller-manager.service        kube-scheduler.service  

[root@localhost k8s]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.130.30:/usr/lib/systemd/system/
//master02上操作
//修改配置文件kube-apiserver中的IP
[root@localhost ~]# cd /opt/kubernetes/cfg/
[root@localhost cfg]# vim kube-apiserver   修改IP地址
//特别注意:master02一定要有etcd证书
//需要拷贝master01上已有的etcd证书给master02使用
[root@localhost k8s]# scp -r /opt/etcd/ root@192.168.130.30:/opt/
//启动master02中的三个组件服务
[root@localhost cfg]# systemctl start kube-apiserver.service 
[root@localhost cfg]# systemctl start kube-controller-manager.service 
[root@localhost cfg]# systemctl start kube-scheduler.service 
//增加环境变量
[root@localhost cfg]# vim /etc/profile
#末尾添加
export PATH=$PATH:/opt/kubernetes/bin/
[root@localhost cfg]# source /etc/profile
[root@localhost cfg]# kubectl get node
NAME              STATUS   ROLES    AGE     VERSION
192.168.130.20   Ready    <none>   2d12h   v1.12.3
192.168.130.30   Ready    <none>   38h     v1.12.3

以上多节点部署完成
下面部署k8s的负载均衡
//lb01 lb02操作相同

安装nginx和keepalived,两台操作相同
[root@localhost ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@localhost ~]# yum install nginx -y
//添加四层转发
[root@localhost ~]# vim /etc/nginx/nginx.conf 

events {
    worker_connections  1024;
}
stream {

   log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
        server 192.168.195.149:6443;
        server 192.168.195.131:6443;
    }
    server {
                listen 6443;
                proxy_pass k8s-apiserver;
    }
    }
http {
[root@localhost ~]# systemctl start nginx

//部署keepalived服务
[root@localhost ~]# yum install keepalived -y
//修改配置文件
[root@localhost ~]# cp keepalived.conf /etc/keepalived/keepalived.conf 
cp:是否覆盖"/etc/keepalived/keepalived.conf" yes
//注意:lb01是Mster配置如下:

! Configuration File for keepalived 
 
global_defs { 
   # 接收邮件地址 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   # 邮件发送地址 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.195.100/24 
    } 
    track_script {
        check_nginx
    } 
}
注意:lb2配置要修改一下内容
 state BACKUP 
   priority 90   
   
[root@localhost ~]# vim /etc/nginx/check_nginx.sh  ##检查脚本
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
[root@localhost ~]# chmod +x /etc/nginx/check_nginx.sh
[root@localhost ~]# systemctl start keepalived
[root@localhost ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:06:c3:0d brd ff:ff:ff:ff:ff:ff
    inet 192.168.130.50/24 brd 192.168.130.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.130.100/24 scope global secondary ens33   ##vip
       valid_lft forever preferred_lft forever
    inet6 fe80::695c:1dbe:f9b2:ca8e/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

//验证地址漂移(lb01中使用pkill nginx,再在lb02中使用ip a 查看)
//恢复操作(在lb01中先启动nginx服务,再启动keepalived服务)
//nginx站点/usr/share/nginx/html

//开始修改node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)
[root@localhost cfg]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@localhost cfg]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@localhost cfg]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
//统统修改为VIP
server: https://192.168.130.100:6443
[root@localhost cfg]# systemctl restart kubelet.service 
[root@localhost cfg]# systemctl restart kube-proxy.service 
//替换完成直接自检
[root@localhost cfg]# grep 100 *
bootstrap.kubeconfig:    server: https://192.168.130.100:6443
kubelet.kubeconfig:    server: https://192.168.130.100:6443
kube-proxy.kubeconfig:    server: https://192.168.130.100:6443
//在lb01上查看nginx的k8s日志
[root@localhost ~]# tail /var/log/nginx/k8s-access.log 
192.168.130.20 192.168.130.10:6443 - [05/Feb/2020:12:43:50 +0800] 200 1121
192.168.130.20 192.168.130.40:6443 - [05/Feb/2020:12:43:50 +0800] 200 1120
192.168.130.30 192.168.130.40:6443 - [05/Feb/2020:12:45:38 +0800] 200 1121
192.168.130.30 192.168.130.10:6443 - [05/Feb/2020:12:45:38 +0800] 200 1121
//在master01上操作
//测试创建pod
[root@localhost ~]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
//查看状态
[root@localhost ~]# kubectl get pods
NAME                    READY   STATUS              RESTARTS   AGE
nginx-dbddb74b8-nf9sk   0/1     ContainerCreating   0          33s   //正在创建中
[root@localhost ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-dbddb74b8-nf9sk   1/1     Running   0          80s  //创建完成,运行中

//注意日志问题
[root@localhost ~]# kubectl logs nginx-dbddb74b8-nf9sk
Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-nf9sk)

[root@localhost ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created
//查看pod网络
[root@localhost ~]# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE              NOMINATED NODE
nginx-dbddb74b8-nf9sk   1/1     Running   0          11m   172.17.31.3   192.168.195.150   <none>

//在对应网段的node节点上操作可以直接访问
[root@localhost cfg]# curl 172.17.31.3
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
......
//访问就会产生日志
//回到master01操作
[root@localhost ~]# kubectl logs nginx-dbddb74b8-nf9sk
172.17.31.1 - - [05/Feb/2020:05:08:36 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐