一、pacemaker搭建k8s高可用 server5和server6

1.配置部署

[root@server5 ~]# yum install -y pacemaker pcs psmisc policycoreutils-python
[root@server5 ~]# systemctl enable --now pcsd.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.
[root@server5 ~]# passwd hacluster
Changing password for user hacluster.
New password: 
BAD PASSWORD: The password is shorter than 8 characters
Retype new password: 
passwd: all authentication tokens updated successfully.
##上述步骤执行完 在server6执行,完成之后在在做如下步骤

[root@server5 ~]# pcs cluster auth server5 server6
Username: hacluster
Password: 
server5: Authorized
server6: Authorized
[root@server5 ~]# pcs cluster setup --name mycluster server5 server6
Destroying cluster on nodes: server5, server6...
server5: Stopping Cluster (pacemaker)...
server6: Stopping Cluster (pacemaker)...
server5: Successfully destroyed cluster
server6: Successfully destroyed cluster

Sending 'pacemaker_remote authkey' to 'server5', 'server6'
server5: successful distribution of the file 'pacemaker_remote authkey'
server6: successful distribution of the file 'pacemaker_remote authkey'
Sending cluster config files to the nodes...
server5: Succeeded
server6: Succeeded

Synchronizing pcsd certificates on nodes server5, server6...
server5: Success
server6: Success
Restarting pcsd on the nodes in order to reload the certificates...
server5: Success
server6: Success
[root@server5 ~]# pcs cluster enable --all
server5: Cluster Enabled
server6: Cluster Enabled
[root@server5 ~]# pcs cluster start --all
server5: Starting Cluster (corosync)...
server6: Starting Cluster (corosync)...
server5: Starting Cluster (pacemaker)...
server6: Starting Cluster (pacemaker)...
[root@server5 ~]# pcs property set stonith-enabled=false
[root@server5 ~]# crm_verify -L -V
[root@server5 ~]# pcs status 

[root@server5 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=172.25.15.100 op monitor interval=30s	#添加vip
[root@server5 ~]# pcs status

[root@server5 ~]# ip addr
[root@server5 ~]# ping 172.25.15.100

[root@server5 ~]# yum install -y haproxy.x86_64
[root@server5 ~]# ls
[root@server5 ~]# cd /etc/haproxy/
[root@server5 haproxy]# ls
haproxy.cfg
[root@server5 haproxy]# vim haproxy.cfg 
 59 listen stats *:80
 60    stats uri /status
 61 #-----------------------------------------------------------    ----------
 62 # main frontend which proxys to the backends
 63 #-----------------------------------------------------------    ----------
 64 frontend  main *:6443
 65     mode tcp
 66     default_backend             app
 67 
 68 #-----------------------------------------------------------    ----------
 69 # round robin balancing between the various backends
 70 #-----------------------------------------------------------    ----------
 71 backend app
 72     balance     roundrobin
 73     mode tcp
 74     server  k8s1 172.25.15.7:6443 check
 75     server  k8s2 172.25.15.8:6443 check
 76     server  k8s3 172.25.15.9:6443 check

[root@server5 haproxy]# systemctl restart haproxy.service 
[root@server5 haproxy]# netstat -antlp |grep :6443
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      6309/haproxy        
tcp        0      1 172.25.15.5:51052       172.25.15.8:6443        SYN_SENT    6309/haproxy        
tcp        0      1 172.25.15.5:59988       172.25.15.7:6443        SYN_SENT    6309/haproxy        
[root@server5 haproxy]# netstat -antlp |grep :80
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      6309/haproxy        
## 将配置好的server5 传给server6
[root@server6 ~]# yum install -y haproxy.x86_64 
[root@server5 haproxy]# scp haproxy.cfg server6:/etc/haproxy/
root@server6's password: 
haproxy.cfg                   100% 2686     2.9MB/s   00:00    
[root@server5 haproxy]# pwd
/etc/haproxy
[root@server5 haproxy]# 

请添加图片描述
请添加图片描述
请添加图片描述
请添加图片描述
请添加图片描述请添加图片描述
请添加图片描述
请添加图片描述
请添加图片描述

2.浏览器访问

http://172.25.15.5/status
http://172.25.15.5/status

请添加图片描述
请添加图片描述

3.高可用

[root@server5 haproxy]# pcs resource create haproxy systemd:haproxy op monitor interval=60s	#每60秒刷新一次,检查有无节点坏死
[root@server5 haproxy]# pcs resource group add hagroup vip haproxy	
[root@server5 haproxy]# pcs status

请添加图片描述

## 只有一个可以访问,当其中一个坏死,另一个接管
http://172.25.15.5/status
http://172.25.15.6/status

请添加图片描述
请添加图片描述

二、k8s集群部署

三个节点相同操作 server7 server8 server9

1.安装docker

[root@server7 ~]# vim /etc/yum.repos.d/westos.repo 
[root@server7 ~]# cat /etc/yum.repos.d/westos.repo
[wan]
name="wan"
baseurl=http://172.25.15.250/rhel7
gpgcheck=0
[docker]
name="docker"
baseurl=ftp://172.25.15.250/pub/docs/docker/docker-ce
gpgcheck=0
[root@server7 ~]# yum install -y docker-ce	

请添加图片描述

2.配置解析

[root@server7 ~]# vim /etc/hosts
172.25.15.1 server1 reg.westos.org	#仓库解析
[root@server8 ~]# vim /etc/hosts
172.25.15.1 server1 reg.westos.org	
[root@server9 ~]# vim /etc/hosts
172.25.15.1 server1 reg.westos.org	

请添加图片描述

3.默认仓库文件

[root@server1 harbor]# cd /etc/docker/
[root@server1 docker]# ls
certs.d  daemon.json  key.json
[root@server1 docker]# scp daemon.json server7:/etc/docker/
[root@server7 ~]# cd /etc/docker/
[root@server7 docker]# ls
key.json
[root@server7 docker]# ls
daemon.json  key.json
[root@server7 ~]# vim /etc/docker/daemon.json 
[root@server7 ~]# cat /etc/docker/daemon.json
{
"registry-mirrors" : ["https://reg.westos.org"],
"exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"

}

请添加图片描述

4.避免报错,配置docker

[root@server1 docker]# cd /etc/sysctl.d
[root@server1 sysctl.d]# ls
99-sysctl.conf  docker.conf
[root@server1 sysctl.d]# scp docker.conf server7:/etc/sysctl.d
root@server7's password: 
docker.conf                    100%   79    93.2KB/s   00:00    
[root@server1 sysctl.d]# 
[root@server7 docker]# sysctl --system	#立即生效
[root@server7 docker]# systemctl restart docker
[root@server7 docker]# docker info

请添加图片描述

5.证书

[root@server1 sysctl.d]# cd /etc/docker/
[root@server1 docker]# ls
certs.d  daemon.json  key.json
[root@server1 docker]# scp  -r certs.d/ server7:/etc/docker/
root@server7's password: 
ca.crt                         100% 2106     1.3MB/s   00:00    
[root@server1 docker]# 

请添加图片描述

6.仓库镜像拉取测试

[root@server7 docker]# docker pull busybox
[root@server7 docker]# docker images
[root@server7 docker]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
busybox             latest              59788edf1f3e        2 years ago         1.15MB

[root@server7 docker]# modprobe ip_vs
[root@server7 docker]# lsmod | grep ip_vs
ip_vs                 145497  0 
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
[root@server7 docker]# 

请添加图片描述
请添加图片描述

7.下载部署安装包

[root@foundation15 ~]# lftp 172.25.254.250
lftp 172.25.254.250:~> cd pub/docs/k8s
cd ok, cwd=/pub/docs/k8s
lftp 172.25.254.250:/pub/docs/k8s> get kubeadm-1.21.3.tar.gz 
65271261 bytes transferred                          
lftp 172.25.254.250:/pub/docs/k8s> exit
[root@foundation15 ~]# scp kubeadm-1.21.3.tar.gz 172.25.15.7:

[root@server7 ~]# ls
kubeadm-1.21.3.tar.gz
[root@server7 ~]# tar axf kubeadm-1.21.3.tar.gz 
[root@server7 ~]# ls
kubeadm-1.21.3.tar.gz  packages
[root@server7 ~]# cd packages/
[root@server7 packages]# ls
14bfe6e75a9efc8eca3f638eb22c7e2ce759c67f95b43b16fae4ebabde1549f3-cri-tools-1.13.0-0.x86_64.rpm
23f7e018d7380fc0c11f0a12b7fda8ced07b1c04c4ba1c5f5cd24cd4bdfb304d-kubeadm-1.21.3-0.x86_64.rpm
7e38e980f058e3e43f121c2ba73d60156083d09be0acc2e5581372136ce11a1c-kubelet-1.21.3-0.x86_64.rpm
b04e5387f5522079ac30ee300657212246b14279e2ca4b58415c7bf1f8c8a8f5-kubectl-1.21.3-0.x86_64.rpm
db7cb5cb0b3f6875f54d10f02e625573988e3e91fd4fc5eef0b1876bb18604ad-kubernetes-cni-0.8.7-0.x86_64.rpm
[root@server7 packages]# yum install -y *	#安装所有安装包

[root@server7 packages]# systemctl enable --now kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

请添加图片描述

8.server8和server9

[root@server8 ~]# yum install -y docker-ce
[root@server9 ~]# systemctl enable --now docker.service
[root@server7 ~]# ls
kubeadm-1.21.3.tar.gz  kubeadm-init.yaml  packages
[root@server7 ~]# scp -r  packages/ 172.25.15.8:
[root@server7 ~]# scp -r  packages/ 172.25.15.9:
[root@server8 ~]# ls
packages
[root@server8\9 ~]# cd packages/
[root@server8\9 packages]# yum install -y *
[root@server8\9 packages]# systemctl enable --now kubelet.service
## 将server7上部署好的 仓库文件 、避免报错文件 证书文件发送到server8和server9
### 默认仓库
[root@server7 docker]# scp daemon.json 172.25.15.8:/etc/docker
root@172.25.15.8's password: 
daemon.json                                    100%  275   213.6KB/s   00:00    
[root@server7 docker]# scp daemon.json 172.25.15.9:/etc/docker
root@172.25.15.9's password: 
daemon.json                                    100%  275   195.6KB/s   00:00 
### 证书
[root@server7 docker]# ls
certs.d  daemon.json  key.json
[root@server7 docker]# scp -r certs.d/ 172.25.15.8:/etc/docker
root@172.25.15.8's password: 
ca.crt                                         100% 2106     2.2MB/s   00:00    
[root@server7 docker]# scp -r certs.d/ 172.25.15.9:/etc/docker
root@172.25.15.9's password: 
ca.crt                                         100% 2106     2.4MB/s   00:00 
## 避免报错
[root@server7 docker]# cd /etc/sysctl.d
[root@server7 sysctl.d]# ls
99-sysctl.conf  docker.conf
[root@server7 sysctl.d]# scp docker.conf 172.25.15.8:/etc/sysctl.d
root@172.25.15.8's password: 
docker.conf                                    100%   79    60.0KB/s   00:00    
[root@server7 sysctl.d]# scp docker.conf 172.25.15.9:/etc/sysctl.d
root@172.25.15.9's password: 
docker.conf                                    100%   79    67.2KB/s   00:00    
[root@server7 sysctl.d]# 
[root@server8/9 docker]# sysctl --system
[root@server8/9 docker]# systemctl restart docker.service 

请添加图片描述
请添加图片描述
请添加图片描述

9.初始化

[root@server7 packages]# cd
[root@server7 ~]# kubeadm config print init-defaults >kubeadm-init.yaml
[root@server7 ~]# ls
kubeadm-1.21.3.tar.gz  kubeadm-init.yaml  packages
[root@server7 ~]# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.25.15.7
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: server7
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.25.15.100:6443"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: reg.westos.org/k8s
kind: ClusterConfiguration
kubernetesVersion: 1.21.3
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

[root@server7 ~]# kubeadm config images pull --config kubeadm-init.yaml 

[root@server7 ~]# swapoff -a
[root@server7 ~]# vim /etc/fstab
[root@server7 ~]# kubeadm init --config kubeadm-init.yaml --upload-certs	#初始化
[root@server7 ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf

vim kubeadm-init.yaml

请添加图片描述

请添加图片描述
请添加图片描述
请添加图片描述

禁用swap

请添加图片描述
请添加图片描述
请添加图片描述
请添加图片描述

初始化

如果初始化出现错误后,先清理之前初始的错误文件
kubeadm reset

请添加图片描述
请添加图片描述

10.安装flanne网络组件,添加节点

[root@server7 ~]# lftp 172.25.254.250
lftp 172.25.254.250:~> cd pub/docs/k8s/
cd ok, cwd=/pub/docs/k8s
lftp 172.25.254.250:/pub/docs/k8s> get kube-flannel.yml 
4783 bytes transferred
lftp 172.25.254.250:/pub/docs/k8s> exit
[root@server7 ~]# ls
kubeadm-1.21.3.tar.gz  kubeadm-init.yaml  kube-flannel.yml  packages
[root@server7 ~]# vim kube-flannel.yml 
[root@server7 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc 
[root@server7 ~]# kubectl apply -f kube-flannel.yml

请添加图片描述

11. 各节点join(加入节点)

[root@server8 network-scripts]# kubeadm join 172.25.15.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:eb2e1d5a951eef9978f1c7a0f7774714d966331e02a38c4c8fc0893d2ec53f84 --control-plane --certificate-key 081acc5ee7d52afcd9e4d098703097c310e69564aea64b7e73a84a91b5fc3364
[root@server9 network-scripts]# kubeadm join 172.25.15.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:eb2e1d5a951eef9978f1c7a0f7774714d966331e02a38c4c8fc0893d2ec53f84 --control-plane --certificate-key 081acc5ee7d52afcd9e4d098703097c310e69564aea64b7e73a84a91b5fc3364

[root@server7 ~]# kubectl get pod -n kube-system
NAME                              READY   STATUS    RESTARTS   AGE
coredns-7777df944c-c8w55          1/1     Running   0          11m
coredns-7777df944c-zkhjq          1/1     Running   0          11m
etcd-server7                      1/1     Running   0          11m
etcd-server8                      1/1     Running   0          9m7s
etcd-server9                      1/1     Running   0          10m
kube-apiserver-server7            1/1     Running   0          11m
kube-apiserver-server8            1/1     Running   2          8m56s
kube-apiserver-server9            1/1     Running   0          10m
kube-controller-manager-server7   1/1     Running   1          11m
kube-controller-manager-server8   1/1     Running   0          8m26s
kube-controller-manager-server9   1/1     Running   0          10m
kube-flannel-ds-6m89f             1/1     Running   0          9m17s
kube-flannel-ds-n9gww             1/1     Running   0          10m
kube-flannel-ds-tns2f             1/1     Running   1          10m
kube-proxy-76jh6                  1/1     Running   0          9m17s
kube-proxy-nt7cg                  1/1     Running   0          10m
kube-proxy-v6q69                  1/1     Running   0          11m
kube-scheduler-server7            1/1     Running   1          11m
kube-scheduler-server8            1/1     Running   0          8m15s
kube-scheduler-server9            1/1     Running   0          10m
[root@server7 ~]# kubectl get node
NAME      STATUS   ROLES                  AGE     VERSION
server7   Ready    control-plane,master   11m     v1.21.3
server8   Ready    control-plane,master   9m26s   v1.21.3
server9   Ready    control-plane,master   10m     v1.21.3

请添加图片描述
请添加图片描述
请添加图片描述
请添加图片描述

12.浏览器访问http://172.25.15.6/status

监控到节点

请添加图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐