k8s多节点部署(续单节点部署)
下文的多节点部署是在单节点部署的基础上扩展,加入一个master,这样k8s集群中便有两个master,同时为两个master设置负载均衡,运行keepalived实例,虚拟IP对于node节点来说则是master的IP,需要去修改node节点指向的masterIP在单节点部署基础上加入以下节点主机名IP地址备注master02192.168.218.142maste...
·
下文的多节点部署是在单节点部署的基础上扩展,加入一个master,这样k8s集群中便有两个master,同时为两个master设置负载均衡,运行keepalived实例,虚拟IP对于node节点来说则是master的IP,需要去修改node节点指向的masterIP
在单节点部署基础上加入以下节点
主机名 | IP地址 | 备注 |
---|---|---|
master02 | 192.168.218.142 | master02 |
nginx01 | 192.168.218.131 | LB的master,运行nginx和keepalived |
nginx02 | 192.168.218.132 | LB的backup,运行nginx和keepalived |
初始化
# 所有主机进行初始化
# 添加主机名解析,注意master01、node01、node02需添加新的主机名解析
vim /etc/hosts
192.168.218.141 master01
192.168.218.142 master02
192.168.218.151 node01
192.168.218.152 node02
192.168.218.131 nginx01
192.168.218.132 nginx02
# 设置时区
vim /etc/profile
TZ='Asia/Shanghai'
export TZ
source /etc/profile
# 同步时间
ntpdate ntp.aliyun.com
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
在单节点的master01上将相关文件复制到master02上
# master01
# 复制kubernetes目录到master02
[root@master01 k8s]# scp -r /opt/kubernetes/ root@master02:/opt/
# 复制master中的三个组件启动脚本
[root@master01 k8s]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@master02:/usr/lib/systemd/system/
# master02
[root@master02 ~]# cd /opt/kubernetes/cfg/
# 修改apiserver启动参数
[root@master02 cfg]# vim kube-apiserver
...
--bind-address=192.168.218.142 \
--secure-port=6443 \
--advertise-address=192.168.218.142 \
...
# master01
# 特别注意:master02一定要有etcd证书
# 需要拷贝master01上已有的etcd证书给master02使用
[root@master01 k8s]# scp -r /opt/etcd/ root@master02:/opt/
# master02
# 启动master服务
[root@master02 cfg]# systemctl start kube-apiserver.service
[root@master02 cfg]# systemctl start kube-controller-manager.service
[root@master02 cfg]# systemctl start kube-scheduler.service
# 添加环境变量
[root@master02 cfg]# echo "export PATH=$PATH:/opt/kubernetes/bin/" >> /etc/profile
[root@master02 cfg]# source /etc/profile
[root@master02 cfg]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.218.151 Ready <none> 3h10m v1.12.3
192.168.218.152 Ready <none> 24m v1.12.3
# 设置master服务开启启动
[root@master02 cfg]# systemctl enable kube-apiserver.service
[root@master02 cfg]# systemctl enable kube-controller-manager.service
[root@master02 cfg]# systemctl enable kube-scheduler.service
负载均衡
nginx
上传install_nginx.sh nginx-1.12.2.tar.gz
nginx安装脚本见脚本安装LNMP
# 安装nginx
bash install_nginx.sh
# 配置nginx服务脚本
vim /lib/systemd/system/nginx.service
[Unit]
Description=nginx
After=network.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/bin/kill -s HUP $MAINPID
ExecStop=/usr/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
chmod 754 /lib/systemd/system/nginx.service
mkdir /var/log/nginx
vim /usr/local/nginx/conf/nginx.conf
# 添加
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.218.141:6443;
server 192.168.218.142:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
nginx -t
systemctl start nginx
systemctl enable nginx
ps -ef | grep nginx
keepalived
# 安装keepalived
yum -y install keepalived
# nginx01的keepalived配置
[root@nginx01 ~]# mv /etc/keepalived/keepalived.conf{,.bak}
# 编写keepalived.conf文件
[root@nginx01 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/usr/local/nginx/sbin/check_nginx.sh"
weight -20
interval 2
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改网卡
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.218.140/24 # vip for k8s master
}
track_script {
check_nginx
}
}
# nginx02的keepalived配置
[root@nginx02 ~]# mv /etc/keepalived/keepalived.conf{,.bak}
[root@nginx02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_BACKUP # nginx backup
}
vrrp_script check_nginx {
script "/usr/local/nginx/sbin/check_nginx.sh"
weight -20
interval 2
}
vrrp_instance VI_1 {
state BACKUP # BACKUP
interface ens33
virtual_router_id 51
priority 90 # 优先级小于master
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.218.140/24 # vip for k8s master
}
track_script {
check_nginx
}
}
nginx检查脚本
vim /usr/local/nginx/sbin/check_nginx.sh
#!/bin/bash
count=$(netstat -antp|grep nginx|wc -l)
if [ "$count" -eq 0 ];then
pkill keepalived
exit 1
fi
启动负载均衡
chmod +x /usr/local/nginx/sbin/check_nginx.sh
systemctl start keepalived
systemctl enable keepalived
# 查看nginx01IP地址,漂移地址在nginx01上
[root@nginx01 ~]# ip a
...
inet 192.168.218.140/24 scope global secondary ens33
valid_lft forever preferred_lft forever
...
负载均衡的VIP作为node节点的apiserver的IP地址
# 开始修改node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)
# 注意两个node都改
vim /opt/kubernetes/cfg/bootstrap.kubeconfig
vim /opt/kubernetes/cfg/kubelet.kubeconfig
vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
# 三个文件都改:
server: https://192.168.218.140:6443
systemctl restart kubelet
systemctl restart kube-proxy
# 检查
[root@node01 ~]# cd /opt/kubernetes/cfg/
[root@node01 cfg]# grep 140 *
bootstrap.kubeconfig: server: https://192.168.218.140:6443
kubelet.kubeconfig: server: https://192.168.218.140:6443
kube-proxy.kubeconfig: server: https://192.168.218.140:6443
# nginx01上看日志
[root@nginx01 ~]# tail /var/log/nginx/k8s-access.log
192.168.218.151 192.168.218.141:6443 - [16/Mar/2020:22:54:55 +0800] 200 1121
192.168.218.151 192.168.218.142:6443 - [16/Mar/2020:22:54:55 +0800] 200 1121
192.168.218.151 192.168.218.142:6443 - [16/Mar/2020:22:54:55 +0800] 200 1120
# master01上测试
[root@master01 ~]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
[root@master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-g5rfk 0/1 ContainerCreating 0 11s
# 稍等
[root@master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-g5rfk 1/1 Running 0 86s
# 注意日志问题
[root@master01 ~]# kubectl logs nginx-dbddb74b8-g5rfk
Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-g5rfk)
[root@master01 ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created
[root@master01 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx-dbddb74b8-g5rfk 1/1 Running 0 2m39s 172.17.81.2 192.168.218.152 <none>
# 在对应的node上可以访问
[root@node02 cfg]# curl 172.17.81.2
# 返回master01,访问过后就有日志
[root@master01 ~]# kubectl logs nginx-dbddb74b8-g5rfk
172.17.81.1 - - [16/Mar/2020:23:06:42 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
大功告成!
更多推荐
已为社区贡献6条内容
所有评论(0)