二进制部署K8S单节点
Kubernetes集群部署环境规划一、ETCD集群部署1.安装cfssl证书生成工具2.生成证书3.etcd部署环境规划Master:192.168.30.7/24kube-apiserver kube-controller-manager kube-scheduler etcdNode01:192.168.30.8/24kubelet kube-proxy docker flannel etc
·
Kubernetes集群部署
环境规划
Master:192.168.30.7/24 kube-apiserver kube-controller-manager kube-scheduler etcd
Node01:192.168.30.8/24 kubelet kube-proxy docker flannel etcd
Node02:192.168.30.9/24 kubelet kube-proxy docker flannel etcd
一、ETCD数据库集群部署
1.安装cfssl证书生成工具
[root@master ~]# curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
[root@master ~]# curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
[root@master ~]# curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
[root@master bin]# chmod +x *
[root@master bin]# ll
总用量 18808
-rwxr-xr-x 1 root root 10376657 1月 16 2020 cfssl
-rwxr-xr-x 1 root root 6595195 1月 16 2020 cfssl-certinfo
-rwxr-xr-x 1 root root 2277873 1月 16 2020 cfssljson
2.生成证书
[root@master etcd-cert]# pwd
/root/k8s/etcd-cert
[root@master etcd-cert]# cat etcd-cert.sh //生成证书的脚本
cat > ca-config.json <<EOF //定义ca证书配置文件
{
"signing": { //键名称
"default": {
"expiry": "87600h" //证书有效期(10年)
},
"profiles": { //简介
"www": { //名称
"expiry": "87600h",
"usages": [ //使用方法
"signing", //键
"key encipherment", //密钥验证
"server auth", //服务器端验证
"client auth" //客户端验证
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF //定义证书签名文件
{
"CN": "etcd CA", //CA签名为etcd指定
"key": {
"algo": "rsa", //使用rsa非对称密钥的形式
"size": 2048 //密钥长度为2048
},
"names": [ //在证书中定义信息
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - //生成证书,生成ca-key.pem 和ca.pem
cat > server-csr.json <<EOF //定义服务器端签名证件
{
"CN": "etcd",
"hosts": [ //定义节点的IP地址
"192.168.30.7",
"192.168.30.8",
"192.168.30.9"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server //根据服务器端签名文件生成证书
[root@master etcd-cert]# bash etcd-cert.sh
2021/09/26 22:19:25 [INFO] generating a new CA key and certificate from CSR
2021/09/26 22:19:25 [INFO] generate received request
2021/09/26 22:19:25 [INFO] received CSR
2021/09/26 22:19:25 [INFO] generating key: rsa-2048
2021/09/26 22:19:25 [INFO] encoded CSR
2021/09/26 22:19:25 [INFO] signed certificate with serial number 208309645292718926890373407855697935641908855232
2021/09/26 22:19:25 [INFO] generate received request
2021/09/26 22:19:25 [INFO] received CSR
2021/09/26 22:19:25 [INFO] generating key: rsa-2048
2021/09/26 22:19:25 [INFO] encoded CSR
2021/09/26 22:19:25 [INFO] signed certificate with serial number 493577706729989292028890461217837542719628512047
2021/09/26 22:19:25 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master etcd-cert]# ls //*.pem就是我们要的证书
ca-config.json ca-csr.json ca.pem server.csr server-key.pem
ca.csr ca-key.pem etcd-cert.sh server-csr.json server.pem
[root@master etcd-cert]#
3.etcd部署
3.1 创建对应目录,拷贝相应文件
[root@master k8s]# mkdir /opt/etcd/{cfg,bin,ssl} -p //创建配置文件、命令文件、证书
[root@master k8s]# mv etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /opt/etcd/bin/ //拷贝命令文件至相应目录
[root@master k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/ //拷贝证书文件至相应目录
3.2 查看启动脚本
[root@master k8s]# cat etcd.sh //查看脚本文件
#!/bin/bash
#以下为使用格式:etcd名称 当前etcd的IP地址+完整的集群名称和地址
# example: ./etcd.sh etcd01 192.168.1.10 etcd02=https://192.168.1.11:2380,etcd03=https://192.168.1.12:2380
ETCD_NAME=$1 //位置变量1:etcd节点名称
ETCD_IP=$2 //位置变量2:节点地址
ETCD_CLUSTER=$3 //位置变量3:集群
WORK_DIR=/opt/etcd //指定工作目录
cat <<EOF >$WORK_DIR/cfg/etcd //在指定工作目录创建ETCD的配置文件
#[Member]
ETCD_NAME="${ETCD_NAME}" //etcd名称
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380" //etcd IP地址:2380端口。用于集群之间通讯
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379" //etcd IP地址:2379端口,用于开放给外部客户端通讯
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379" //对外提供的url使用https的协议进行访问
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}" //多路访问
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" //tokens 令牌环名称:etcd-cluster
ETCD_INITIAL_CLUSTER_STATE="new" //状态,重新创建
EOF
cat <<EOF >/usr/lib/systemd/system/etcd.service //定义ectd的启动脚本
[Unit] //基本项
Description=Etcd Server //类似为 etcd 服务
After=network.target
After=network-online.target
Wants=network-online.target
[Service] //服务项
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd //etcd文件位置
ExecStart=${WORK_DIR}/bin/etcd \ //准启动状态及以下的参数
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \ //以下为群集内部的设定
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \ //群集内部通信,也是使用的令牌,为了保证安全(防范中间人窃取)
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \ //证书相关参数
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536 //开放最多的端口号
[Install]
WantedBy=multi-user.target //进行启动
EOF
systemctl daemon-reload //参数重载
systemctl enable etcd
systemctl restart etcd
3.3 执行脚本等待节点加入
[root@master k8s]# bash etcd.sh etcd01 192.168.30.7 etcd02=https://192.168.30.8:2380,etcd03=https://192.168.30.9:2380 //执行脚本,等待其他节点加入(生成启动脚本)
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
Job for etcd.service failed because the control process exited with error code. See "systemctl status etcd.service" and "journalctl -xe" for details.
[root@master k8s]# ls /opt/etcd/cfg/
etcd
[root@master k8s]# cat /opt/etcd/cfg/etcd //查看生成的配置文件
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.30.7:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.30.7:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.30.7:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.30.7:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.30.7:2380,etcd02=https://192.168.30.8:2380,etcd03=https://192.168.30.9:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@master k8s]#
3.4 将文件拷贝到其他节点,并修改配置文件
[root@master k8s]# scp -r /opt/etcd/ root@192.168.30.8:/opt/ //拷贝文件到节点
root@192.168.30.8's password:
etcd 100% 502 579.4KB/s 00:00
etcd 100% 18MB 45.4MB/s 00:00
etcdctl 100% 15MB 109.5MB/s 00:00
ca-key.pem 100% 1679 1.3MB/s 00:00
ca.pem 100% 1265 419.6KB/s 00:00
server-key.pem 100% 1679 1.5MB/s 00:00
server.pem 100% 1338 422.4KB/s 00:00
[root@master k8s]# scp -r /opt/etcd/ root@192.168.30.9:/opt/
root@192.168.30.9's password:
etcd 100% 502 584.3KB/s 00:00
etcd 100% 18MB 61.2MB/s 00:00
etcdctl 100% 15MB 113.6MB/s 00:00
ca-key.pem 100% 1679 1.1MB/s 00:00
ca.pem 100% 1265 448.8KB/s 00:00
server-key.pem 100% 1679 1.2MB/s 00:00
server.pem 100% 1338 643.7KB/s 00:00
[root@master k8s]#
[root@node1 ~]# vim /opt/etcd/cfg/etcd
[root@node1 ~]# cat /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.30.8:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.30.8:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.30.8:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.30.8:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.30.7:2380,etcd02=https://192.168.30.8:2380,etcd03=https://192.168.30.9:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@node1 ~]# systemctl start etcd
[root@node2 ~]# vim /opt/etcd/cfg/etcd
[root@node2 ~]# cat /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.30.9:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.30.9:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.30.9:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.30.9:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.30.7:2380,etcd02=https://192.168.30.8:2380,etcd03=https://192.168.30.9:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@node2 ~]# systemctl start etcd
3.5 启动etcd,查看集群状态
[root@master k8s]# bash etcd.sh etcd01 192.168.30.7 etcd02=https://192.168.30.8:2380,etcd03=https://192.168.30.9:2380
[root@master k8s]# cd etcd-cert/
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379" cluster-health
member f05d004bd02940f is healthy: got healthy result from https://192.168.30.7:2379
member 85d13a9cc78ad7df is healthy: got healthy result from https://192.168.30.9:2379
member c9b6b0b860128c0f is healthy: got healthy result from https://192.168.30.8:2379
cluster is healthy
[root@master etcd-cert]#
二、Node部署docker
1.所有node部署docker
[root@node1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
已加载插件:fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
软件包 yum-utils-1.1.31-54.el7_8.noarch 已安装并且是最新版本
软件包 device-mapper-persistent-data-0.8.5-3.el7_9.2.x86_64 已安装并且是最新版本
软件包 7:lvm2-2.02.187-6.el7_9.5.x86_64 已安装并且是最新版本
无须任何处理
[root@node1 ~]# cd /etc/yum.repos.d/
[root@node1 yum.repos.d]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
已加载插件:fastestmirror, langpacks
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
2.镜像加速
[root@localhost ~]# sudo mkdir -p /etc/docker
[root@localhost ~]# sudo tee /etc/docker/daemon.json <<-'EOF'
> {
> "registry-mirrors": ["https://xxxxxx.mirror.aliyuncs.com"]
> }
> EOF
[root@localhost ~]# sudo systemctl daemon-reload
[root@localhost ~]# sudo systemctl restart docker
[root@localhost ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://xxxxxx.mirror.aliyuncs.com"]
}
三、Flannel容器集群网络部署
1.写入分配的子网段到etcd中,供flanel使用
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379" get /coreos.com/network/config //查看写入的信息
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master etcd-cert]#
2.拷贝flannel包到node节点
[root@master k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz root@192.168.30.8:/root
root@192.168.30.8's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 102.0MB/s 00:00
[root@master k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz root@192.168.30.9:/root
root@192.168.30.9's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 43.6MB/s 00:00
[root@master k8s]#
[root@node1 ~]# tar xf flannel-v0.10.0-linux-amd64.tar.gz //解压传入的文件
[root@node1 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p //创建k8s工作目录
[root@node1 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/ //将文件移动到相应目录
3.配置文件和启动脚本
[root@node1 ~]# cat /opt/kubernetes/cfg/flanneld //配置文件
FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"
[root@node1 ~]# cat /usr/lib/systemd/system/flanneld.service //启动脚本
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
[root@node1 ~]#
4.启动flannel
[root@node1 cfg]# systemctl start flanneld
[root@node1 cfg]# systemctl daemon-reload
[root@node1 cfg]# systemctl enable flanneld
[root@node1 cfg]# systemctl restart flanneld
5.配置docker连接flannel
[root@node1 cfg]# vim /usr/lib/systemd/system/docker.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
6.重启docker
[root@node1 cfg]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.66.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.66.1/24 --ip-masq=false --mtu=1450"
[root@node1 cfg]# systemctl daemon-reload
[root@node1 cfg]# systemctl restart docker
[root@node1 cfg]#
7.查看flannel网络
[root@node1 cfg]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8c:7d:09 brd ff:ff:ff:ff:ff:ff
inet 192.168.30.8/24 brd 192.168.30.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe8c:7d09/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:fb:7f:18 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:fb:7f:18 brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:c2:de:67:7a brd ff:ff:ff:ff:ff:ff
inet 172.17.66.1/24 brd 172.17.66.255 scope global docker0
valid_lft forever preferred_lft forever
6: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 2a:43:50:e5:70:14 brd ff:ff:ff:ff:ff:ff
inet 172.17.66.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::2843:50ff:fee5:7014/64 scope link
valid_lft forever preferred_lft forever
[root@node1 cfg]# ping 172.17.64.1
PING 172.17.64.1 (172.17.64.1) 56(84) bytes of data.
64 bytes from 172.17.64.1: icmp_seq=1 ttl=64 time=0.669 ms
64 bytes from 172.17.64.1: icmp_seq=2 ttl=64 time=0.451 ms
^C
--- 172.17.64.1 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.451/0.560/0.669/0.109 ms
[root@node1 cfg]#
[root@node2 cfg]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:0c:53:27 brd ff:ff:ff:ff:ff:ff
inet 192.168.30.9/24 brd 192.168.30.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe0c:5327/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:1b:0e:bb brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:1b:0e:bb brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e1:90:dc:0b brd ff:ff:ff:ff:ff:ff
inet 172.17.64.1/24 brd 172.17.64.255 scope global docker0
valid_lft forever preferred_lft forever
6: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether d2:11:41:cb:c5:55 brd ff:ff:ff:ff:ff:ff
inet 172.17.64.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::d011:41ff:fecb:c555/64 scope link
valid_lft forever preferred_lft forever
[root@node2 cfg]# ping 172.17.66.1
PING 172.17.66.1 (172.17.66.1) 56(84) bytes of data.
64 bytes from 172.17.66.1: icmp_seq=1 ttl=64 time=0.366 ms
64 bytes from 172.17.66.1: icmp_seq=2 ttl=64 time=0.464 ms
^C
--- 172.17.66.1 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.366/0.415/0.464/0.049 ms
[root@node2 cfg]#
四、部署Master组件
1.生成k8s证书
[root@master k8s-cert]# cat k8s-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.30.7",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#-----------------------
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#-----------------------
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
[root@master k8s-cert]# bash k8s-cert.sh
2021/09/27 00:08:22 [INFO] generating a new CA key and certificate from CSR
2021/09/27 00:08:22 [INFO] generate received request
2021/09/27 00:08:22 [INFO] received CSR
2021/09/27 00:08:22 [INFO] generating key: rsa-2048
2021/09/27 00:08:22 [INFO] encoded CSR
2021/09/27 00:08:22 [INFO] signed certificate with serial number 478360010460524378597942783674963208253320007077
2021/09/27 00:08:22 [INFO] generate received request
2021/09/27 00:08:22 [INFO] received CSR
2021/09/27 00:08:22 [INFO] generating key: rsa-2048
2021/09/27 00:08:22 [INFO] encoded CSR
2021/09/27 00:08:22 [INFO] signed certificate with serial number 386202515578783260209642708627500626972309043182
2021/09/27 00:08:22 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2021/09/27 00:08:22 [INFO] generate received request
2021/09/27 00:08:22 [INFO] received CSR
2021/09/27 00:08:22 [INFO] generating key: rsa-2048
2021/09/27 00:08:22 [INFO] encoded CSR
2021/09/27 00:08:23 [INFO] signed certificate with serial number 590076051492728149633684308998359216491752482167
2021/09/27 00:08:23 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2021/09/27 00:08:23 [INFO] generate received request
2021/09/27 00:08:23 [INFO] received CSR
2021/09/27 00:08:23 [INFO] generating key: rsa-2048
2021/09/27 00:08:23 [INFO] encoded CSR
2021/09/27 00:08:23 [INFO] signed certificate with serial number 367870846720115626064994072225779099959546576157
2021/09/27 00:08:23 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master k8s-cert]# ls
admin.csr admin.pem ca-csr.json k8s-cert.sh kube-proxy-key.pem server-csr.json
admin-csr.json ca-config.json ca-key.pem kube-proxy.csr kube-proxy.pem server-key.pem
admin-key.pem ca.csr ca.pem kube-proxy-csr.json server.csr server.pem
[root@master k8s-cert]# ls *.pem
admin-key.pem admin.pem ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
2.将证书复制到相应目录
[root@master k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/
[root@master k8s-cert]# ls /opt/kubernetes/ssl/
ca-key.pem ca.pem server-key.pem server.pem
[root@master k8s-cert]#
3.部署apiserver
[root@master k8s]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@master k8s]# cd kubernetes/server/bin/
[root@master bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
4.创建token文件
[root@master bin]# cd /root/k8s/
[root@master k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
f927b11bbf05b79d2253029c39544bea
[root@master k8s]# vim /opt/kubernetes/cfg/token.csv
[root@master k8s]# cat /opt/kubernetes/cfg/token.csv
f927b11bbf05b79d2253029c39544bea,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
5.启动apiserver
[root@master k8s]# bash apiserver.sh 192.168.30.7 https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@master k8s]# ps aux | grep kube
root 27437 91.7 5.4 412280 324200 ? Ssl 10:55 0:16 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379 --bind-address=192.168.30.7 --secure-port=6443 --advertise-address=192.168.30.7 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 27464 0.0 0.0 112724 980 pts/1 R+ 10:55 0:00 grep --color=auto kube
[root@master k8s]#
6.启动scheduler
[root@master k8s]# ./scheduler.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master k8s]# netstat -antp | grep sch
tcp 0 0 127.0.0.1:38152 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38162 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38150 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38160 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38140 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38144 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38158 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38148 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38146 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38136 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38154 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp 0 0 127.0.0.1:38134 127.0.0.1:8080 ESTABLISHED 27559/kube-schedule
tcp6 0 0 :::10251 :::* LISTEN 27559/kube-schedule
7.启动controller-manager
[root@master k8s]# bash controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master k8s]# netstat -antp | grep contr
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 27683/kube-controll
tcp 0 0 127.0.0.1:38566 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38578 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38354 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38554 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38532 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38614 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38552 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38544 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38600 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38588 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38598 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38480 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38630 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38568 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38594 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38528 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38562 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38530 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38636 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38602 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38632 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38622 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38546 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38584 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38540 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38628 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38638 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38640 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38556 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38548 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38606 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38616 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38618 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38542 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38576 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38634 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38372 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38586 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38596 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38590 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38536 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38534 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38620 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38624 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38564 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38574 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp 0 0 127.0.0.1:38569 127.0.0.1:8080 ESTABLISHED 27683/kube-controll
tcp6 0 0 :::10257 :::* LISTEN 27683/kube-controll
[root@master k8s]#
8.查看master节点状态
[root@master k8s]# /opt/kubernetes/bin/kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
五、部署Node组件
1.将命令拷贝到node
[root@master bin]# cd /root/k8s/kubernetes/server/bin/
[root@master bin]# ls
apiextensions-apiserver kube-apiserver.docker_tag kube-proxy
cloud-controller-manager kube-apiserver.tar kube-proxy.docker_tag
cloud-controller-manager.docker_tag kube-controller-manager kube-proxy.tar
cloud-controller-manager.tar kube-controller-manager.docker_tag kube-scheduler
hyperkube kube-controller-manager.tar kube-scheduler.docker_tag
kubeadm kubectl kube-scheduler.tar
kube-apiserver kubelet mounter
[root@master bin]# scp kubelet kube-proxy root@192.168.30.8:/opt/kubernetes/bin/
root@192.168.30.8's password:
kubelet 100% 168MB 58.6MB/s 00:02
kube-proxy 100% 48MB 48.6MB/s 00:00
[root@master bin]# scp kubelet kube-proxy root@192.168.30.9:/opt/kubernetes/bin/
root@192.168.30.9's password:
kubelet 100% 168MB 50.3MB/s 00:03
kube-proxy 100% 48MB 48.2MB/s 00:00
[root@master bin]#
2.设置参数
[root@master ssl]# kubectl config set-cluster kubernetes --certificate-authority=./ca.pem --embed-certs=true --server=https://192.168.30.7:6443 --kubeconfig=bootstrap.kubeconfig //设置集群参数
Cluster "kubernetes" set.
[root@master ~]# kubectl config set-credentials kubelet-bootstrap --token=f927b11bbf05b79d2253029c39544bea --kubeconfig=bootstrap.kubeconfig //设置客户端认证参数
User "kubelet-bootstrap" set.
[root@master ssl]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig //设置上下文参数
[root@master ssl]#
[root@master ssl]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig //设置默认上下文
Switched to context "default".
3.生成配置文件
[root@master ~]# kubectl config set-credentials kubelet-bootstrap \
> --token=f927b11bbf05b79d2253029c39544bea \
> --kubeconfig=bootstrap.kubeconfig
User "kubelet-bootstrap" set.
[root@master ~]# cd /root/k8s/kubeconfig/
[root@master kubeconfig]# bash kubeconfig 192.168.30.7 /root/k8s/k8s-cert/
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
[root@master kubeconfig]# ls
bootstrap.kubeconfig kubeconfig kube-proxy.kubeconfig
4.将配置文件拷贝至node节点
[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.30.8:/opt/kubernetes/cfg/
root@192.168.30.8's password:
bootstrap.kubeconfig 100% 2125 1.4MB/s 00:00
kube-proxy.kubeconfig 100% 6272 4.5MB/s 00:00
[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.30.9:/opt/kubernetes/cfg/
root@192.168.30.9's password:
bootstrap.kubeconfig 100% 2125 1.7MB/s 00:00
kube-proxy.kubeconfig 100% 6272 3.3MB/s 00:00
[root@master kubeconfig]#
5.创建bootstrap角色赋予权限用于连接apiserver
[root@master kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
6.启动kubelet
[root@node1 ~]# cat kubelet.sh
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: cluster.local.
failSwapOn: false
authentication:
kubelet-bootstrap:
enabled: true
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
[root@node1 ~]#
[root@node1 ~]# bash kubelet.sh 192.168.30.8
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node1 ~]# ps aux | grep kube
root 21129 0.0 0.5 604444 20340 ? Ssl 9月26 0:31 /opt/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.30.7:2379,https://192.168.30.8:2379,https://192.168.30.9:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 80533 0.0 0.0 112724 984 pts/0 S+ 11:31 0:00 grep --color=auto kube
[root@node1 ~]#
[root@master kubeconfig]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-unFJhI0acyf-laiiFytSad8ygVbgCXEoFFFUYtCYius 18s system:anonymous Approved,Issued
node-csr-2rvWyWoKlD8If3dTQpA2Dbt9W1ETGTFX_1lezVWX0E0 15s system:anonymous Approved,Issued
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.30.8 Ready <none> 41m v1.12.3
192.168.30.9 Ready <none> 14s v1.12.3
7.启动kube-proxy
[root@node1 ~]# cat proxy.sh
#!/bin/bash
NODE_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
[root@node1 ~]#
[root@node1 ~]# bash proxy.sh 192.168.30.8
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node1 ~]# systemctl status kube-proxy.service
● kube-proxy.service - Kubernetes Proxy
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2021-09-27 13:05:53 CST; 1min 44s ago
Main PID: 99418 (kube-proxy)
Tasks: 0
Memory: 8.1M
CGroup: /system.slice/kube-proxy.service
‣ 99418 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.30.8 --cluster-cidr=10.0.0.0/24 --proxy-mo...
9月 27 13:07:29 node1 kube-proxy[99418]: I0927 13:07:29.319392 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:29 node1 kube-proxy[99418]: I0927 13:07:29.892228 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:31 node1 kube-proxy[99418]: I0927 13:07:31.327885 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:31 node1 kube-proxy[99418]: I0927 13:07:31.900833 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:33 node1 kube-proxy[99418]: I0927 13:07:33.340690 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:33 node1 kube-proxy[99418]: I0927 13:07:33.910180 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:35 node1 kube-proxy[99418]: I0927 13:07:35.350181 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:35 node1 kube-proxy[99418]: I0927 13:07:35.918789 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:37 node1 kube-proxy[99418]: I0927 13:07:37.360281 99418 config.go:141] Calling handler.OnEndpointsUpdate
9月 27 13:07:37 node1 kube-proxy[99418]: I0927 13:07:37.928068 99418 config.go:141] Calling handler.OnEndpointsUpdate
[root@node1 ~]#
六、部署测试实例
1.创建实例
[root@master ~]# kubectl run nginx --image=nginx --replicas=3 //创建三个pod副本
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
[root@master ~]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort //暴露端口
service/nginx exposed
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-ht9ks 0/1 ContainerCreating 0 21s
nginx-dbddb74b8-j7hmj 0/1 ContainerCreating 0 21s
nginx-dbddb74b8-x8lwp 0/1 ContainerCreating 0 21s
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-ht9ks 1/1 Running 0 113s
nginx-dbddb74b8-j7hmj 1/1 Running 0 113s
nginx-dbddb74b8-x8lwp 1/1 Running 0 113s
[root@master ~]# kubectl get pod -o wide //查看pod在哪个node上
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx-dbddb74b8-ht9ks 1/1 Running 0 3m51s 172.17.66.3 192.168.30.8 <none>
nginx-dbddb74b8-j7hmj 1/1 Running 0 3m51s 172.17.66.2 192.168.30.8 <none>
nginx-dbddb74b8-x8lwp 1/1 Running 0 3m51s 172.17.64.2 192.168.30.9 <none>
[root@master ~]#
2.测试
[root@node1 ~]# curl 172.17.66.3
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node1 ~]# curl 172.17.66.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node1 ~]# curl 172.17.64.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node1 ~]#
更多推荐
已为社区贡献6条内容
所有评论(0)