1 部署k8s高可用原理图

在这里插入图片描述

2 清理环境

[root@server2 helm]# kubectl drain server4  下线server4
[root@server2 helm]# kubectl delete nodes server4  删除server4节点
[root@server4 ~]# kubeadm reset  清掉server4上的数据
[root@server2 helm]# kubectl drain server3 --ignore-daemonsets  下线server3
[root@server2 helm]# kubectl delete nodes server3  删除server3
node "server3" deleted
[root@server3 ~]# kubeadm reset  清掉server3上的数据
[root@server2 helm]# kubectl drain server2  --ignore-daemonsets  下线自己server2
[root@server2 helm]# kubectl delete nodes server2   删除节点
node "server2" deleted
[root@server2 helm]# kubeadm reset   清掉server2上的数据

再增加三台虚拟机server5、server6、server7
其中5和6做负载均衡和高可用
2、3、4做k8s master
7做k8s node节点

3 haproxy负载均衡部署并配置

[root@server5 ~]# yum install -y haproxy    安装haproxy负载均衡
[root@server6 ~]# yum install -y haproxy   安装haproxy负载均衡
[root@server5 ~]# cd /etc/haproxy/ 
[root@server5 haproxy]# vim haproxy.cfg  配置

在这里插入图片描述
在这里插入图片描述

[root@server5 haproxy]# systemctl restart haproxy.service   重启
[root@server5 haproxy]# netstat -antlp

在这里插入图片描述访问 172.25.50.5/admin
在这里插入图片描述

4 pacemaker高可用安装并配置

[root@server5 haproxy]# scp haproxy.cfg server6:/etc/haproxy/   将serve5上的配置文件haproxy.cfg拷贝到塞尔ver6上
[root@server5 haproxy]# cd /etc/yum.repos.d/
[root@server5 yum.repos.d]# vim dvd.repo   配置软件仓库
[dvd]
name=rhel7.6
baseurl=http://172.25.254.50/rhel7.6
gpgcheck=0

[HighAvailability]   添加高可用套件
name=HighAvailability
baseurl=http://172.25.254.50/rhel7.6/addons/HighAvailability
gpgcheck=0
[root@server5 ~]# ssh-keygen 
[root@server5 ~]# ssh-copy-id server6    给server6做免密
[root@server5 ~]# yum install -y pacemaker pcs psmisc policycoreutils-python  安装高可用集群套件
[root@server5 yum.repos.d]# scp dvd.repo server6:/etc/yum.repos.d/  将server5上的仓库拷贝到server6上
[root@server5 yum.repos.d]# ssh server6 yum install -y pacemaker pcs psmisc policycoreutils-python  serevr6上安装高可用集群套件
[root@server5 yum.repos.d]# systemctl enable --now pcsd.service   激活服务
[root@server5 yum.repos.d]# ssh server6 systemctl enable --now pcsd.service   server6上激活服务
[root@server5 yum.repos.d]# echo westos | passwd --stdin hacluster   给高可用账号添加密码
[root@server5 yum.repos.d]# ssh server6 'echo westos | passwd --stdin hacluster'   给高可用账号添加密码
[root@server5 ~]# pcs cluster auth server5 server6  添加高可用节点认证
Username: hacluster    用户
Password:             密码
server5: Authorized 
server6: Authorized
[root@server5 ~]# pcs cluster setup --name mycluster server5 server6   创建集群,集群节点为server5、server6
[root@server5 ~]# pcs cluster start --all  启动集群所有节点服务
[root@server5 ~]# pcs status   查看集群状态

在这里插入图片描述
警告原因:没有配置fence,所以可以先关掉

[root@server5 ~]# pcs property set stonith-enabled=false  禁掉
[root@server5 ~]# systemctl stop haproxy  先停止haproxy
[root@server5 ~]# systemctl disable haproxy
[root@server5 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=172.25.50.100 cidr_netmask=32 op monitor interval=30s  创建vip资源,
op monitor interval=30s  表示对资源进行监控
[root@server5 ~]# ip addr  查看vip是否已经添加

在这里插入图片描述

测试:
[root@server5 ~]# pcs node standby   让server5下线
[root@server5 ~]# pcs status  查看集群状态

在这里插入图片描述

[root@server5 ~]# pcs node unstandby   让server5上线
[root@server5 ~]# pcs status 

在这里插入图片描述

[root@server5 ~]# pcs resource standards  查看本机可调用资源脚本
lsb
ocf
service
systemd
[root@server5 ~]# pcs resource create haproxy systemd:haproxy op monitor interval=60s   创建haproxy服务,调用systemd里面的haproxy服务
op monitor interval=60s   表示每个60s监控一次
[root@server5 ~]# pcs status  查看集群状态

在这里插入图片描述
需要添加约束,要求资源必须在同一个节点上

[root@server5 ~]# pcs resource group add  hagroup vip haproxy  添加一个group资源 , 后面跟需要绑定在一个组的资源
注意资源的顺序,谁在前谁先启动
[root@server5 ~]# pcs status   查看集群状态 ,可以发现资源都在server6上

在这里插入图片描述

5 部署k8s集群

server2、server3、server4  之前学习docker和ks都部署好了
[root@server2 ~]# vim /etc/docker/daemon.json     ##server3和server4需要同样的配置
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
[root@server2 ~]# cat /etc/sysctl.d/docker.conf    server3和server4需要同样的配置
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1	

[root@server2 ~]#  source <(kubectl completion bash) && echo 'source <(kubectl completion bash)' >> ~/.bashrc 配置kubectl命令补齐功能,然后重启     server3和server4需要同样的配置

#加载内核模块:(kube_proxy使用IPVS模式)
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
[root@server2 ~]# yum install ipvsadm -y    server3和server4需要同样的安装

[root@server2 ~]#  kubeadm config print init-defaults > kubeadm-init.yaml    将k8s默认配置放入init文件
[root@server2 ~]# vim kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.25.50.2  添加本机ip地质
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: server2   设置主机名
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.25.50.100:6443"   添加虚拟vip,端口为haproy配置的6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: reg.westos.org/k8s   设置自己的私有仓库
kind: ClusterConfiguration
kubernetesVersion: 1.23.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16    添加pod子网(使用flannel网络主件才需要添加)
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs   设置k8s工作模式为ipvs
[root@server2 ~]# kubeadm config images pull --config kubeadm-init.yaml  预先拉取镜像,镜像之前已经传入镜像仓库,拉取比较快
[config/images] Pulled reg.westos.org/k8s/kube-apiserver:v1.23.6
[config/images] Pulled reg.westos.org/k8s/kube-controller-manager:v1.23.6
[config/images] Pulled reg.westos.org/k8s/kube-scheduler:v1.23.6
[config/images] Pulled reg.westos.org/k8s/kube-proxy:v1.23.6
[config/images] Pulled reg.westos.org/k8s/pause:3.6
[config/images] Pulled reg.westos.org/k8s/etcd:3.5.1-0
[config/images] Pulled reg.westos.org/k8s/coredns:v1.8.6
[root@server2 ~]# kubeadm init --config kubeadm-init.yaml  --upload-certs    初始化,我们设置了多个k8s master节点,需要同步证书--upload-certs

在这里插入图片描述
添加flannel网络插件:

[root@server2 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml   下载flannel网络插件配置文件
[root@server2 ~]# vim kube-flannel.yml 

在这里插入图片描述
在这里插入图片描述
注:之前部署k8s所需镜像已经部署到了私有仓库直接拉取

[root@server2 ~]# kubectl apply -f kube-flannel.yml   创建
[root@server3 ~]# kubeadm join 172.25.50.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:67a2a973878f0cc293e24f154512189bba0b479e85f8c677d700531f96771198 --control-plane --certificate-key 11fb6d675bd00945a9475fdfc8650b861cf5070306a98fe970269fcb8a058b46  将server3节点添加为k8s集群的master
[root@server3 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf  设置变量
[root@server4 ~]# kubeadm join 172.25.50.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:67a2a973878f0cc293e24f154512189bba0b479e85f8c677d700531f96771198 --control-plane --certificate-key 11fb6d675bd00945a9475fdfc8650b861cf5070306a98fe970269fcb8a058b46  将server4节点添加为k8s集群的master
[root@server4 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf  设置变量,自动读取
[root@server4 ~]# kubectl get nodes  查看节点,可以发现3个节点都已经加入
NAME      STATUS   ROLES                  AGE     VERSION
server2   Ready    control-plane,master   71m     v1.23.6
server3   Ready    control-plane,master   11m     v1.23.6
server4   Ready    control-plane,master   3m25s   v1.23.5

在server7上部署k8s node节点

[root@server3 yum.repos.d]# scp docker.repo k8s.repo server7:/etc/yum.repos.d/   从server3上docker、k8s 仓库拷贝到sever7上
[root@server7 ~]# yum install docker-ce -y  安装docker
[root@server7 ~]# systemctl enable --now docker 启动docker服务
[root@server3 yum.repos.d]# cd /etc/docker/
[root@server3 docker]# scp daemon.json server7:/etc/docker/  拷贝server3上配置指定私有仓库文件到server7上
[root@server7 ~]# systemctl restart docker  重启docker
[root@server3 sysctl.d]# scp docker.conf  server7:/etc/sysctl.d/   
[root@server7 ~]# sysctl --system   使之生效
[root@server3 docker]# scp -r certs.d/ server7:/etc/docker/    从server3上拷贝仓库证书到server7
[root@server7 ~]# vim /etc/hosts
172.25.50.1  server1  reg.westos.org   添加仓库地址解析
添加ipvs模块
[root@server7 ~]# yum install ipvsadm -y    安装ipvs
[root@server7 ~]# lsmod | grep ip_vs    过滤ip_vs 已经存在
ip_vs                 145497  0  
[root@server7 ~]# swapoff -a  禁掉swap分区
[root@server7 ~]# vim /etc/fstab   禁掉swap分区
#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server7 ~]# yum install -y kubeadm kublete  安装
[root@server7 ~]# systemctl enable --now kubelet  设置开机自起
[root@server7 ~]# kubeadm join 172.25.50.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:67a2a973878f0cc293e24f154512189bba0b479e85f8c677d700531f96771198   将server7添加为k8s node节点
[root@server2 ~]# kubectl get node  查看节点
NAME      STATUS   ROLES                  AGE     VERSION
server2   Ready    control-plane,master   3h15m   v1.23.6
server3   Ready    control-plane,master   135m    v1.23.6
server4   Ready    control-plane,master   127m    v1.23.5
server7   Ready    <none>                 75s     v1.23.6

测试:
[root@server2 ~]# kubectl run demo --image=nginx   运行一个容器
pod/demo created
[root@server2 ~]# kubectl get pod -o wide   
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
demo   1/1     Running   0          30m   10.244.3.2   server7   <none>           <none>      调度在server7上
[root@server2 ~]# poweroff  将server2下线
[root@server3 docker]# kubectl get pod   在server3上查看pod,依然能查到
NAME   READY   STATUS    RESTARTS   AGE
demo   1/1     Running   0          94m
[root@server3 docker]# poweroff    将server3下线
[root@server4 ~]# kubectl get pod  查不到了,只有一个节点就不是高可用了
Unable to connect to the server: net/http: TLS handshake timeout
[root@foundation50 isos]# virsh start vm2  恢复server2,server2节点自动加入集群
Domain vm2 started 
[root@foundation50 isos]# virsh start vm3   恢复server3,server3节点自动加入集群
Domain vm3 started
[root@server3 ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf
[root@server3 ~]# kubectl get node  查看节点,节点都已经自动加入集群
NAME      STATUS   ROLES                  AGE     VERSION
server2   Ready    control-plane,master   5h45m   v1.23.6
server3   Ready    control-plane,master   4h45m   v1.23.6
server4   Ready    control-plane,master   4h36m   v1.23.5
server7   Ready    <none>                 150m    v1.23.6
[root@server2 ~]# kubectl cluster-info  查看集群信息
Kubernetes control plane is running at https://172.25.50.100:6443   集群控制节点在https://172.25.50.100:6443 上
CoreDNS is running at https://172.25.50.100:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@server2 ~]# kubectl get pod   可以查看pod
NAME   READY   STATUS    RESTARTS   AGE
demo   1/1     Running   0          166m
[root@server2 ~]# kubectl delete pod demo   删除pod
pod "demo" deleted
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐