k8s单节点部署(二进制部署)

主机名内存&CPUIP地址
master012G、2核2线程192.168.218.141
node012G、2核2线程192.168.218.151
node022G、2核2线程192.168.218.152

ISO:CentOS-7-x86_64-DVD-1908.iso

软件包和脚本下载

链接:https://pan.baidu.com/s/1aIB_TP_n89gMZaM82p-rCg
提取码:pxf3

初始化

# 所有主机进行初始化


# 添加主机名解析
vim /etc/hosts
192.168.218.141	master01
192.168.218.151	node01
192.168.218.152	node02

# 设置时区
vim /etc/profile
TZ='Asia/Shanghai'
export TZ
source /etc/profile

# 同步时间
ntpdate ntp.aliyun.com

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
setenforce 0


上传相关文件到master01的:/k8s_data目录下

[root@master01 k8s_data]# ll
total 433296
-rw-r--r--. 1 root root      1088 Mar 16 08:14 etcd-cert.sh
-rw-r--r--. 1 root root      1764 Mar 16 08:14 etcd.sh
-rw-r--r--. 1 root root  11353259 Mar 16 08:14 etcd-v3.3.10-linux-amd64.tar.gz
-rw-r--r--. 1 root root       836 Mar 16 08:14 flannel.sh
-rw-r--r--. 1 root root   9706487 Mar 16 08:14 flannel-v0.10.0-linux-amd64.tar.gz
-rw-r--r--. 1 root root      2266 Mar 16 08:14 k8s-cert.sh
-rw-r--r--. 1 root root      1611 Mar 16 08:14 kubeconfig.sh
-rw-r--r--. 1 root root 422603860 Mar 16 08:14 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--. 1 root root      1948 Mar 16 08:14 master.zip
-rw-r--r--. 1 root root      1240 Mar 16 08:14 node.zip

制作etcd的tls证书

# 创建目录存放k8s相关文件
[root@master01 ~]# mkdir k8s

[root@master01 k8s]# cp /k8s_data/etcd-cert.sh /k8s_data/etcd.sh .
[root@master01 k8s]#  mkdir etcd-cert
[root@master01 k8s]# mv etcd-cert.sh etcd-cert

# 使用脚本下载证书工具
# cfssl 生成证书工具   
# cfssljson通过传入json文件生成证书  
# cfssl-certinfo查看证书信息
[root@master01 k8s]# vim cfssl.sh
[root@master01 k8s]# cat cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
[root@master01 k8s]# bash cfssl.sh


# 定义ca证书
cat > ca-config.json <<-EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"    
        ] 
      }
    }        
  }
}
EOF



# 实现证书签名
cat > ca-csr.json <<-EOF
{  
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF


# 生产证书,生成ca-key.pem  ca.pem
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

# 指定etcd三个节点之间的通信验证
cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.218.141",
    "192.168.218.151",
    "192.168.218.152"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

[root@master01 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
# 移动证书到etcd证书目录
[root@master01 k8s]# mv *.json *.pem *.csr etcd-cert/
[root@master01 k8s]# mv /k8s_data/*.tar.gz .
[root@master01 k8s]# tree .
.
├── cfssl.sh
├── etcd-cert
│   ├── ca-config.json
│   ├── ca.csr
│   ├── ca-csr.json
│   ├── ca-key.pem
│   ├── ca.pem
│   ├── etcd-cert.sh
│   ├── server.csr
│   ├── server-csr.json
│   ├── server-key.pem
│   └── server.pem
├── etcd.sh
├── etcd-v3.3.10-linux-amd64.tar.gz
├── flannel-v0.10.0-linux-amd64.tar.gz
└── kubernetes-server-linux-amd64.tar.gz

1 directory, 15 files

etcd部署

[root@master01 k8s]# tar xf etcd-v3.3.10-linux-amd64.tar.gz
[root@master01 k8s]# ls etcd-v3.3.10-linux-amd64
Documentation  etcd  etcdctl  README-etcdctl.md  README.md  READMEv2-etcdctl.md
# 创建配置文件,命令文件,证书目录
[root@master01 k8s]# mkdir /opt/etcd/{cfg,bin,ssl} -p    
[root@master01 k8s]# mv etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /opt/etcd/bin/
# 证书拷贝
[root@master01 k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/

############# etcd.sh ###############

#!/bin/bash
# example: ./etcd.sh etcd01 192.168.1.10 etcd02=https://192.168.1.11:2380,etcd03=https://192.168.1.12:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
# 定义etcd相关文件目录
WORK_DIR=/opt/etcd
# 生成etcd配置文件
cat <<EOF >$WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
# 定义启动脚本
cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
# 启动etcd并设置开启自启
systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd



# 卡住,等待其他节点加入
[root@master01 k8s]# bash etcd.sh etcd01 192.168.218.141 etcd02=https://192.168.218.151:2380,etcd03=https://192.168.218.152:2380

# 新开一个终端,将etcd相关文件发送到两个node节点上
[root@master01 ~]# scp -r /opt/etcd/ root@node01:/opt/
[root@master01 ~]# scp -r /opt/etcd/ root@node02:/opt/
[root@master01 ~]# scp /usr/lib/systemd/system/etcd.service root@node01:/usr/lib/systemd/system/
[root@master01 ~]# scp /usr/lib/systemd/system/etcd.service root@node02:/usr/lib/systemd/system/

node01修改etcd配置文件

# 包括
# ETCD_NAME、ETCD_LISTEN_PEER_URLS、ETCD_LISTEN_CLIENT_URLS
# ETCD_INITIAL_ADVERTISE_PEER_URLS、ETCD_ADVERTISE_CLIENT_URLS

[root@node01 ~]# vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.218.151:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.218.151:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.218.151:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.218.151:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.218.141:2380,etcd02=https://192.168.218.151:2380,etcd03=https://192.168.218.152:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


[root@node01 ~]# systemctl start etcd
[root@node01 ~]# systemctl enable etcd

node02修改配置文件

# 与node01类似

[root@node02 ~]#  vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.218.152:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.218.152:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.218.152:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.218.152:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.218.141:2380,etcd02=https://192.168.218.151:2380,etcd03=https://192.168.218.152:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


[root@node02 ~]# systemctl start etcd
[root@node02 ~]# systemctl enable etcd

在master01上进行健康检测

# cd到/root/k8s/etcd-cert目录下
/opt/etcd/bin/etcdctl \
--ca-file=ca.pem \
--cert-file=server.pem \
--key-file=server-key.pem \
--endpoints="https://192.168.218.141:2379,https://192.168.218.151:2379,https://192.168.218.152:2379" \
cluster-health

node01、node02进行docker部署

参见我的博客,

https://blog.csdn.net/weixin_43515220/article/details/105371439

flannel网络部署

# 在master01上
# 设置vxlan网络
[root@master01 etcd-cert]# cd etcd-cert/
/opt/etcd/bin/etcdctl --ca-file=ca.pem \
--cert-file=server.pem \
--key-file=server-key.pem \
--endpoints="https://192.168.218.141:2379,\
https://192.168.218.151:2379,\
https://192.168.218.152:2379" \
set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

# 查看网络
/opt/etcd/bin/etcdctl \
--ca-file=ca.pem \
--cert-file=server.pem \
--key-file=server-key.pem \
--endpoints="https://192.168.218.141:2379,\
https://192.168.218.151:2379,\
https://192.168.218.152:2379" \
get /coreos.com/network/config

{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

复制flannel组件软件包到node节点上

[root@master01 etcd-cert]# cd ..
# flannel只需在node节点上部署
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz root@192.168.218.151:/root/
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz root@192.168.218.152:/root/

# node01、node02执行
tar xf flannel-v0.10.0-linux-amd64.tar.gz
mkdir /opt/kubernetes/{cfg,bin,ssl} -p
mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/


vim flannel.sh
# 写入以下内容
#!/bin/bash

ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
# 生成flannel启动参数文件
cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF
# 生成flannel启动脚本
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF
# 开启并设置开机启动
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld

# 开启flannel网络功能
# 两个node节点执行
bash flannel.sh https://192.168.218.141:2379,https://192.168.218.151:2379,https://192.168.218.152:2379

配置docker连接flannel

# docker在两个node节点已完成安装
vim /usr/lib/systemd/system/docker.service
...
# 添加
EnvironmentFile=/run/flannel/subnet.env
# 添加$DOCKER_NETWORK_OPTIONS
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
...


# 重启docker
systemctl daemon-reload
systemctl restart docker

测试flannel网络

# 在两个node节点运行容器
# 两个node节点上的容器能互通
# 则说明flannel网络部署成功
[root@node01 ~]# docker run -it centos /bin/bash
[root@node02 ~]# docker run -it centos /bin/bash


# 两个容器互相ping对方IP地址即可

部署master组件

# 在master01上执行
[root@master01 k8s]# mv /k8s_data/master.zip .
# master.zip包含三个脚本
# apiserver.sh   controller-manager.sh   scheduler.sh
[root@master01 k8s]# unzip master.zip
[root@master01 k8s]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@master01 k8s]# mkdir k8s-cert
[root@master01 k8s]# cd k8s-cert/
[root@master01 k8s-cert]# cp /k8s_data/k8s-cert.sh .

# k8s-cert.sh内容
# 该脚本用于生成k8s证书
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "192.168.218.141",    # master01
      "192.168.218.142",    # master02为以后部署多节点做准备
      "192.168.218.140",    # 多节点的vip
      "192.168.218.131",    # 多节点的负载均衡  lb master
      "192.168.218.132",    # 多节点的负载均衡  lb backup
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

生成master证书

[root@master01 k8s-cert]# bash k8s-cert.sh
# 复制证书到指定证书目录
[root@master01 k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/
[root@master01 k8s-cert]# cd ..
[root@master01 k8s]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@master01 k8s]# cd /root/k8s/kubernetes/server/bin/
# 复制关键命令
[root@master01 bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
[root@master01 bin]# cd /root/k8s/
# 随机生成序列号
[root@master01 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
a72c0edf9beb9e6ff959416357ff3098
# 将生成的序列号用作bootstrap的token认证
[root@master01 k8s]# vim /opt/kubernetes/cfg/token.csv
a72c0edf9beb9e6ff959416357ff3098,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

配置启动kube-apiserver

# apiserver.sh内容
#!/bin/bash

MASTER_ADDRESS=$1
ETCD_SERVERS=$2
# 生成apiserver启动参数文件
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF
# 生成apiserver启动脚本
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
# 开启并设置开机启动
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver


# 启动apiserver
[root@master01 k8s]# bash apiserver.sh 192.168.218.141 https://192.168.218.141:2379,https://192.168.218.151:2379,https://192.168.218.152:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

设置并启动kube-scheduler

# scheduler.sh内容
#!/bin/bash

MASTER_ADDRESS=$1
# 生成scheduler启动参数文件
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"

EOF
# 生成启动脚本
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
# 开启并设置开机启动
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler



# 启动scheduler服务
[root@master01 k8s]# ./scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.

设置并启动kube-controller-manager

# controller-manager.sh内容
#!/bin/bash

MASTER_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager

# 生成controller-manager启动参数文件
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"

EOF
# 生成controller-manager启动脚本
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
# 启动并设置开机启动 
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager



[root@master01 k8s]# chmod +x controller-manager.sh
[root@master01 k8s]# ./controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
# 查看状态
[root@master01 k8s]#  /opt/kubernetes/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}
etcd-1               Healthy   {"health":"true"}

node节点部署

# 在master上把kubelet、kube-proxy拷贝到node节点上去
[root@master01 bin]# pwd
/root/k8s/kubernetes/server/bin
[root@master01 bin]# scp kubelet kube-proxy root@node01:/opt/kubernetes/bin/
[root@master01 bin]# scp kubelet kube-proxy root@node02:/opt/kubernetes/bin/
[root@master01 bin]# scp /k8s_data/node.zip root@node01:/root/
[root@master01 bin]# scp /k8s_data/node.zip root@node02:/root/
# node.zip包含两个脚本
# kubelet.sh    proxy.sh
[root@node01 ~]# unzip node.zip
# master上操作
[root@master01 bin]# mkdir kubeconfig
[root@master01 bin]# cd kubeconfig/
[root@master01 kubeconfig]# cp /k8s_data/kubeconfig.sh .
[root@master01 kubeconfig]# mv kubeconfig.sh kubeconfig
[root@master01 kubeconfig]# vim kubeconfig
# 删除以下部分
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
 
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF




# 添加环境变量到/etc/profile
echo "export PATH=$PATH:/opt/kubernetes/bin" >> /etc/profile
[root@master01 kubeconfig]# export PATH=$PATH:/opt/kubernetes/bin/
# 获取token
[root@master01 kubeconfig]# cat /opt/kubernetes/cfg/token.csv
a72c0edf9beb9e6ff959416357ff3098,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
# 修改kubeconfig,--token参数要一致
[root@master01 kubeconfig]# vim kubeconfig
--token=a72c0edf9beb9e6ff959416357ff3098

[root@master01 kubeconfig]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}
etcd-1               Healthy   {"health":"true"}

[root@master01 kubeconfig]# bash kubeconfig 192.168.218.141 /root/k8s/k8s-cert/

# 复制配置文件到node节点
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@node01:/opt/kubernetes/cfg/
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@node02:/opt/kubernetes/cfg/

# 创建bootstrap角色赋予权限用于连接apiserver请求签名(关键)
[root@master01 kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

node01节点,开启kubelet

# kubelet.sh内容
#!/bin/bash

NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
# 生成kubelet启动参数文件
cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

EOF
# kubelet配置
cat <<EOF >/opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP} 
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true
EOF
# kubelet启动脚本
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
# 开启并设置开机启动
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet


[root@node01 ~]# bash kubelet.sh 192.168.218.151
[root@master01 kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-p5MLvZ8ujKdPFuQfMHgnB6mm5qGHw-7ii4QocOMmH6Q   8s    kubelet-bootstrap   Pending
# node的NAME要一致
[root@master01 kubeconfig]# kubectl certificate approve  node-csr-p5MLvZ8ujKdPFuQfMHgnB6mm5qGHw-7ii4QocOMmH6Q
certificatesigningrequest.certificates.k8s.io/node-csr-p5MLvZ8ujKdPFuQfMHgnB6mm5qGHw-7ii4QocOMmH6Q approved
# 已允许加入集群
[root@master01 kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-p5MLvZ8ujKdPFuQfMHgnB6mm5qGHw-7ii4QocOMmH6Q   2m14s   kubelet-bootstrap   Approved,Issued

[root@master01 kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE   VERSION
192.168.218.151   Ready    <none>   56s   v1.12.3

# proxy.sh内容
#!/bin/bash

NODE_ADDRESS=$1
# 生成proxy启动参数文件
cat <<EOF >/opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

EOF
# 生成proxy启动脚本
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
# 开启并设置开机启动
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy


[root@node01 ~]# bash proxy.sh 192.168.218.151
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node01 ~]# systemctl status kube-proxy

node02节点部署

# 复制相关文件
[root@node01 ~]# scp -r /opt/kubernetes/ root@node02:/opt/
[root@node01 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@node02:/usr/lib/systemd/system/
# node02
# 删除复制过来的证书,node02的证书需另外申请
[root@node02 ~]# cd /opt/kubernetes/ssl/
[root@node02 ssl]# rm -rf *


# 修改启动参数
# 主要修改IP地址
[root@node02 ssl]# cd ../cfg/
[root@node02 cfg]# vim kubelet
...
--hostname-override=192.168.218.152 \
...
[root@node02 cfg]# vim kubelet.config
...
address: 192.168.218.152
...
[root@node02 cfg]# vim kube-proxy
...
--hostname-override=192.168.218.152 \
...

# 启动服务
[root@node02 cfg]# systemctl start kubelet.service
[root@node02 cfg]# systemctl  enable kubelet

[root@node02 cfg]# systemctl start kube-proxy
[root@node02 cfg]# systemctl enable kube-proxy


# master查看请求
[root@master01 kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-0PiRSC0hMBFFCvIx2fihWhaVbcj4U4t1Yjli7C75BWQ   4m48s   kubelet-bootstrap   Pending
# node名称一致
[root@master01 kubeconfig]# kubectl certificate approve node-csr-0PiRSC0hMBFFCvIx2fihWhaVbcj4U4t1Yjli7C75BWQ
certificatesigningrequest.certificates.k8s.io/node-csr-0PiRSC0hMBFFCvIx2fihWhaVbcj4U4t1Yjli7C75BWQ approved
# 稍等一会
[root@master01 kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE    VERSION
192.168.218.151   Ready    <none>   165m   v1.12.3
192.168.218.152   Ready    <none>   14s    v1.12.3
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐