kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)
kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)集群角色角色IP地址主机名master1192.168.26.100k8s-01master2/node192.168.26.120k8s-02master3/node192.168.26.130k8s-03node192.168.26.170k8s-04vip192.168.26.15
kubeadm部署k8s高可用集群 Kubernetes 1.18.14(其他版本替换版本号1.18.14即可)
集群角色
角色 | IP地址 | 主机名 |
---|---|---|
master1 | 192.168.26.100 | k8s-01 |
master2/node | 192.168.26.120 | k8s-02 |
master3/node | 192.168.26.130 | k8s-03 |
node | 192.168.26.170 | k8s-04 |
vip | 192.168.26.150 |
在192.168.26.100这台master操作
这里写个脚本
我们用master1机器做ssh免密
快速修改hosts和主机名
##先添加hosts文件
cat >> /etc/hosts <<EOF
192.168.26.100 k8s-01
192.168.26.120 k8s-02
192.168.26.130 k8s-03
192.168.26.170 k8s-04
EOF
[root@localhost ~]# yum -y install expect ##4台都安装
[root@localhost ~]# ssh-keygen ##一顿回车 生成公钥
vim ssh-hosts.sh
#!/bin/bash
##下方ip地址和主机名修改成自己的
##656768是密码也修改成自己的
for i in 192.168.26.100 192.168.26.120 192.168.26.130 192.168.26.170 k8s-02 k8s-03 k8s-04;do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
expect {
\"*yes/no*\" {send \"yes\r\"; exp_continue}
\"*password*\" {send \"656768\r\"; exp_continue}
\"*Password*\" {send \"656768\r\";}
} "
done
for host_name in k8s-02 k8s-03 k8s-04;do
scp /etc/hosts $host_name:/etc/
done
上面的脚本已经同步了hosts文件,下一步修改对应的主机名
hostnamectl set-hostname k8s-01 ##先修改自己的主机名
ssh k8s-02 ##第二台
hostnamectl set-hostname k8s-02
exit
ssh k8s-03 ##第三台
hostnamectl set-hostname k8s-03
exit
ssh k8s-04 ##第四台
hostnamectl set-hostname k8s-04
exit
回来100节点 ping检查以下是否有误
[root@k8s-01 ~]# ping k8s-01
PING k8s-01 (192.168.26.100) 56(84) bytes of data.
64 bytes from k8s-01 (192.168.26.100): icmp_seq=1 ttl=64 time=0.020 ms
[root@k8s-01 ~]# ping k8s-02
PING k8s-02 (192.168.26.120) 56(84) bytes of data.
64 bytes from k8s-02 (192.168.26.120): icmp_seq=1 ttl=64 time=0.252 ms
[root@k8s-01 ~]# ping k8s-03
PING k8s-03 (192.168.26.130) 56(84) bytes of data.
64 bytes from k8s-03 (192.168.26.130): icmp_seq=1 ttl=64 time=0.234 ms
[root@k8s-01 ~]# ping k8s-04
PING k8s-04 (192.168.26.170) 56(84) bytes of data.
64 bytes from k8s-04 (192.168.26.170): icmp_seq=1 ttl=64 time=0.151 ms
ok 初始环境准备完事,开始修改各节点系统配置
1. 配置系统环境 (全部机器执行)
以下操作默认每台机器都要执行
##可以每个步骤手敲,也可以直接建个脚本scp到各台机器,自动部署
vim k8s_init.sh
#!/bin/bash
##安装相关软件
yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
wait
##关闭防火墙firewalld
systemctl stop firewalld && systemctl disable firewalld
wait
##关闭iptables
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
wait
##关闭防火墙SELINUX
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
wait
##关闭swap分区
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
wait
##开启网络转发
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF
wait
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
wait
##系统配置
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
wait
sysctl -p /etc/sysctl.d/k8s.conf
##安装docker环境
yum remove -y docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
wait
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-18.09.9-3.el7
systemctl start docker
systemctl enable docker
sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
wait
##配置加速器
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://bk6kzfqm.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
wait
##重启docker
systemctl daemon-reload
systemctl restart docker
wait
#安装k8s组件
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#重建yum缓存,输入y添加证书认证
wait
yum makecache fast
wait
yum install -y kubelet-1.18.14 kubectl-1.18.14 kubeadm-1.18.14
wait
##将 Container Runtime、kubelet 配置成使用 systemd 来作为 cgroup 驱动
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/sysconfig/kubelet
wait
systemctl daemon-reload
systemctl restart kubelet
systemctl enable --now kubelet
systemctl status kubelet
#安装bash自动补全插件
yum install bash-completion -y
#设置kubectl与kubeadm命令补全,下次login生效
kubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm
2.master节点配置高可用 (3台master机器执行)
1.安装/配置keepalived haproxy
yum install keepalived haproxy -y
##修改配置文件
##修改k8s-01的配置文件
vim /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL_01 //此处变量应保持唯一
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" //健康检查脚本
interval 8
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state MASTER //主节点
interface eth0 //网卡名
mcast_src_ip 192.168.26.100 //本机ip
virtual_router_id 51 //两台主机需保持一致
priority 150 //权重,主节点应大于备份节点
advert_int 1
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.26.150/24 //虚拟ip
}
track_script {
chk_apiserver
}
}
k8s-02和k8s-03类似
#vi /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL_02
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 8
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state BACKUP //备份节点
interface eth0
mcast_src_ip 192.168.26.120
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.26.150/24
}
track_script {
chk_apiserver
}
}
2.编辑3台master的检测脚本
#vi /etc/keepalived/check_apiserver.sh
err=0
for k in $(seq 1 5)
do
check_code=$(pgrep kube-apiserver)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 5
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
3.配置haproxy.cfg (3台master都要操作)
cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server k8s-01 192.168.26.100:6443 check
server k8s-02 192.168.26.120:6443 check
server k8s-03 192.168.26.130:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
EOF
4.启动和检查两个服务
systemctl enable keepalived.service
systemctl start keepalived.service
systemctl status keepalived.service
ip a s eth0
[root@k8s-01 ~]# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:bf:bf:a5 brd ff:ff:ff:ff:ff:ff
inet 192.168.26.100/24 brd 192.168.26.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.26.150/24 scope global secondary eth0
valid_lft forever preferred_lft forever
systemctl enable haproxy
systemctl start haproxy
systemctl status haproxy
ss -anput |grep haproxy
[root@k8s-01 ~]# ss -anput |grep haproxy
udp UNCONN 0 0 *:56573 *:* users:(("haproxy",pid=69064,fd=6),("haproxy",pid=69063,fd=6))
tcp LISTEN 0 128 *:1080 *:* users:(("haproxy",pid=69064,fd=7))
tcp LISTEN 0 128 *:16443 *:* users:(("haproxy",pid=69064,fd=5))
3.初始化集群(k8s-01操作)
kubeadm init \
--control-plane-endpoint "192.168.26.150:16443" \
--kubernetes-version "1.18.14" \
--pod-network-cidr "10.0.0.0/8" \
--service-cidr "172.16.0.0/16" \
--token "abcdef.0123456789abcdef" \
--token-ttl "0" \
--image-repository registry.aliyuncs.com/google_containers \
--upload-certs
##上面的命令会有格式问题请勿直接粘贴,用下面粘贴版本
kubeadm init --control-plane-endpoint "192.168.26.150:16443" --kubernetes-version "1.18.14" --pod-network-cidr "10.0.0.0/8" --service-cidr "172.16.0.0/16" --token "abcdef.0123456789abcdef" --token-ttl "0" --image-repository registry.aliyuncs.com/google_containers --upload-certs
##初始命令会拉取集群镜像根据网络不同,等待时间不同,耐心等待
参数解释:
–control-plane-endpoint:为控制平面指定一个固定的虚拟IP地址。其值应与负载均衡vip一致,若负载均衡与master位于同一主机,请指定与6443不同的端口号。
–kubernetes-version:指定kubernetes版本号。
–pod-network-cidr:指定pod网络的IP地址集。
–service-cidr:为service VIPs指定IP地址集。
–token:用于控制平面和节点之间建立双向结构。
–token-ttl:设置token过期时间。“0”表示不过期。
–image-repository :指定拉取控制平面镜像的仓库。
–upload-certs:上传控制平面证书到kubeadm-certs Secret。
##输出以下结果说明成功
##如果失败,检查vip是否启动成功
##kubeadm reset 后重新执行初始化
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
##这段用来配置kubectl管理集群(只要安装了kubectl就能配置管理集群)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config ##提示没有admin.conf 就从master机器scp一份
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
##这段用来加入master节点
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
--control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
##这段用来加入node节点
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f
##配置kubectl管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
master节点用这段
node节点复制这段
4.其他节点加入集群
k8s-02
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
--control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
##配置kubectl管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
k8s-03
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f \
--control-plane --certificate-key 0b6faa9d215fd51d97587ece45480abe58b88607c8cb776fc85d0edd354c8dab
##配置kubectl管理
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
k8s-04(node节点)
kubeadm join 192.168.26.150:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e7fd94ec50ce57faea088f3358808f7155ffab364cac8ee2a4c8e66a8f7b6f8f
如果需要在node节点管理集群 也可以同上配置kubectl管理即可
这里我就跳过
查看集群状态,NotReady说明正常,因为还没配置网络
5.配置网络calico (k8s-01执行)
wget https://docs.projectcalico.org/v3.15/manifests/calico.yaml
vim calico.yaml
## 搜ip地址192 大概是3580行 修改成刚刚初始化的pod网段后删除掉注释 保存退出
修改后效果
kubectl apply -f calico.yaml ##等待calico的pod启动成功 需要下载镜像
kubectl get nodes ##查看集群网络状态
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-01 Ready master 19m v1.18.14
k8s-02 Ready master 14m v1.18.14
k8s-03 Ready master 14m v1.18.14
k8s-04 Ready <none> 6m30s v1.18.14
6.修改标签,并且清除污点让master节点能运行pod
kubectl taint nodes <node-name> node-role.kubernetes.io/master- ##删除污点
kubectl label nodes <node-name> node-role.kubernetes.io/node= ##给节点打标签
kubectl get nodes ##查看集群状态
NAME STATUS ROLES AGE VERSION
k8s-01 Ready master 19m v1.18.14
k8s-02 Ready master,node 14m v1.18.14
k8s-03 Ready master,node 14m v1.18.14
k8s-04 Ready node 6m30s v1.18.14
更多推荐
所有评论(0)