在这里插入图片描述
以分布式方式配置集群
在这里插入图片描述
实验环境:
server1:172.25.1.1,haproxy+keepalived,作为master
server2:172.25.1.2,haproxy+keepalived,作为master
server3:172.25.1.4,haproxy+keepalived,作为master
node:172.25.1.20,作为node
每个节点2CPU

1.Loadbalancer部署

yum install -y keepalived haproxy 在三个节点安装keepalived和haproxy
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
haproxy配置

[root@server1 ~]# cd /etc/haproxy/
[root@server1 haproxy]# ls
haproxy.cfg
[root@server1 haproxy]# vim haproxy.cfg 
[root@server1 haproxy]# systemctl enable --now haproxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@server1 haproxy]# scp haproxy.cfg server2:/etc/haproxy/
haproxy.cfg                                                                                                                 100% 2728     3.6MB/s   00:00    
[root@server1 haproxy]# scp haproxy.cfg server3:/etc/haproxy/
haproxy.cfg                                                                                                                 100% 2728     3.7MB/s   00:00    

在这里插入图片描述在这里插入图片描述
注意当haproxy和与k8s集群master节点在同一台主机上不能使用6443端口在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述


2.keepalived配置

[root@server1 ~]# cd /etc/keepalived/
[root@server1 keepalived]# ls
keepalived.conf
[root@server1 keepalived]# vim keepalived.conf 
[root@server1 keepalived]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
#   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
vrrp_script check_haproxy {    
    script "/etc/keepalived/check_haproxy.sh"    ##调用脚本
    interval 5 
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {        ##跟踪脚本
        check_haproxy    
    }

    virtual_ipaddress {
        172.25.1.100
    }
}

[root@server1 keepalived]# vim check_haproxy.sh
[root@server1 keepalived]# cat check_haproxy.sh
#!/bin/bash 
systemctl status haproxy &> /dev/null 
if [ $? != 0 ];then    
   systemctl stop keepalived 
fi
[root@server1 ~]# cd /etc/keepalived/
[root@server1 keepalived]# ls
keepalived.conf
[root@server1 keepalived]# vim keepalived.conf 
[root@server1 keepalived]# vim check_haproxy.sh
[root@server1 keepalived]# cat check_haproxy.sh
#!/bin/bash 
systemctl status haproxy &> /dev/null 
if [ $? != 0 ];then    
   systemctl stop keepalived 
fi
[root@server1 keepalived]# chmod +x check_haproxy.sh
[root@server1 keepalived]# systemctl enable --now keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@server1 keepalived]# scp * server2:/etc/keepalived/
check_haproxy.sh                                                                                                            100%  109    53.1KB/s   00:00    
keepalived.conf                                                                                                             100%  722   287.9KB/s   00:00    
[root@server1 keepalived]# scp * server3:/etc/keepalived/
check_haproxy.sh                                                                                                            100%  109    69.9KB/s   00:00    
keepalived.conf                                                                                                             100%  722   332.0KB/s   00:00  

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
开启server1的haproxy,会抢回vip


3.docker部署

三个节点部署docker引擎

[root@server1 ~]# ls
containerd.io-1.2.13-3.2.el7.x86_64.rpm  docker-ce-19.03.11-3.el7.x86_64.rpm
container-selinux-2.77-1.el7.noarch.rpm  docker-ce-cli-19.03.11-3.el7.x86_64.rpm
[root@server1 ~]# yum install -y *

[root@server1 ~]# systemctl enable --now docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@server1 ~]# cd /etc/docker/
[root@server1 docker]# ls
key.json
[root@server1 docker]# vim daemon.json
[root@server1 docker]# cat daemon.json 
{  
  "registry-mirrors": ["https://reg.red.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],  
  "log-driver": "json-file",  
  "log-opts": {    
    "max-size": "100m"  
  },  
  "storage-driver": "overlay2",  
  "storage-opts": [    
    "overlay2.override_kernel_check=true"  
  ] 
}
[root@server1 docker]# vim /etc/hosts
[root@server1 docker]# tail -1 /etc/hosts
172.25.1.11 reg.red.org
[root@server1 docker]# systemctl reload docker.service 
[root@server1 docker]# scp /etc/docker/daemon.json server2:/etc/docker/
daemon.json                                                                                           100%  293   147.0KB/s   00:00    
[root@server1 docker]# scp /etc/docker/daemon.json server3:/etc/docker/
daemon.json                                                                                           100%  293   295.6KB/s   00:00    
[root@server1 docker]# scp /etc/hosts server2:/etc/hosts
hosts                                                                                                 100%  270   272.6KB/s   00:00    
[root@server1 docker]# scp /etc/hosts server3:/etc/hosts
hosts                                                                                                 100%  270   351.1KB/s   00:00    
[root@server1 docker]# ls
ca.crt  daemon.json  key.json
[root@server1 docker]# scp ca.crt server2:/etc/docker/certs.d/reg.red.org
ca.crt                                                                                                100% 2082     1.8MB/s   00:00    
[root@server1 docker]# scp ca.crt server3:/etc/docker/certs.d/reg.red.org
ca.crt                                                                                                100% 2082     2.6MB/s   00:00    
[root@server1 docker]# systemctl restart docker
[root@server1 docker]# ssh server2 'systemctl restart docker'
[root@server1 docker]# ssh server3 'systemctl restart docker'


[root@server1 ~]# vim /etc/sysctl.d/k8s.conf
[root@server1 ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
[root@server1 ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
[root@server1 ~]# systemctl daemon-reload
[root@server1 ~]# scp /etc/sysctl.d/k8s.conf server2:/etc/sysctl.d/
k8s.conf                                                                                                                    100%   81   116.8KB/s   00:00    
[root@server1 ~]# scp /etc/sysctl.d/k8s.conf server3:/etc/sysctl.d/
k8s.conf                                                                                                                    100%   81   110.3KB/s   00:00  

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述在这里插入图片描述


4.kubeadm部署

加载内核模块:(kube_proxy使用IPVS模式)

[root@server1 ~]# yum install -y ipvsadm
[root@server1 ~]# ssh server2 'yum install -y ipvsadm'
[root@server1 ~]# ssh server3 'yum install -y ipvsadm'
[root@server1 ~]# modprobe ip_vs_rr
[root@server1 ~]# modprobe ip_vs_sh
[root@server1 ~]# modprobe ip_vs_wrr
[root@server1 ~]# lsmod |grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_sh               12688  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

[root@server1 ~]# vim /etc/sysctl.d/k8s.conf
[root@server1 ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1 
vm.swappiness=0
[root@server1 ~]# scp /etc/sysctl.d/k8s.conf server2:/etc/sysctl.d/k8s.conf
k8s.conf                                                                                              100%  123   154.6KB/s   00:00    
[root@server1 ~]# scp /etc/sysctl.d/k8s.conf server3:/etc/sysctl.d/k8s.conf
k8s.conf                                                                                              100%  123   164.6KB/s   00:00    
[root@server1 ~]# 

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述


部署kubeadm

[root@server1 ~]# vim /etc/yum.repos.d/k8s.repo ##配置安装源
[root@server1 ~]# cat /etc/yum.repos.d/k8s.repo
[kubernetes] 
name=Kubernetes 
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 
enabled=1 
gpgcheck=0
[root@server1 ~]# yum repolist
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
kubernetes                                                                                                       | 1.4 kB  00:00:00     
kubernetes/primary                                                                                               |  73 kB  00:00:00     
kubernetes                                                                                                                      533/533
repo id                                                          repo name                                                        status
kubernetes                                                       Kubernetes                                                         533
rhel7source                                                      rhel7source                                                      5,152
repolist: 5,685
[root@server1 ~]# scp /etc/yum.repos.d/k8s.repo server2:/etc/yum.repos.d/k8s.repo
k8s.repo                                                                                              100%  133   172.4KB/s   00:00    
[root@server1 ~]# scp /etc/yum.repos.d/k8s.repo server3:/etc/yum.repos.d/k8s.repo
k8s.repo                                                                                              100%  133   171.9KB/s   00:00   
[root@server1 ~]#yum install -y kubeadm-1.18.3-0 kubelet-1.18.3-0 kubectl-1.18.3-0
[root@server1 ~]# systemctl enable --now kubelet

在这里插入图片描述
在这里插入图片描述在这里插入图片描述

[root@server1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@server1 ~]# source .bashrc

在这里插入图片描述
在这里插入图片描述

[root@server1 ~]# kubeadm config print init-defaults > kubeadm-init.yaml 
[root@server1 ~]# ls
kubeadm-init.yaml
[root@server1 ~]# vim kubeadm-init.yaml 

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

[root@server2 ~]# kubeadm join 172.25.1.100:8443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:5fe6840acf42d0bbd7b1e3e12cb14f3fcd67873934ec0cf7b184b6907a05462f \
> --control-plane --certificate-key 62a49c4fcd8f07397a3186d4eef886d241b2644923aed90da69c7876ed31e3f3

在这里插入图片描述
在这里插入图片描述

[root@server1 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml ##部署网络
[root@server1 ~]# ls
kubeadm-init.yaml  kube-flannel.yml
[root@server1 ~]# kubectl apply -f kube-flannel.yml 
podsecuritypolicy.policy/psp.flannel.unprivileged configured
clusterrole.rbac.authorization.k8s.io/flannel unchanged
clusterrolebinding.rbac.authorization.k8s.io/flannel unchanged
serviceaccount/flannel unchanged
configmap/kube-flannel-cfg configured
daemonset.apps/kube-flannel-ds-amd64 unchanged
daemonset.apps/kube-flannel-ds-arm64 unchanged
daemonset.apps/kube-flannel-ds-arm unchanged
daemonset.apps/kube-flannel-ds-ppc64le unchanged
daemonset.apps/kube-flannel-ds-s390x unchanged
[root@server1 ~]# kubectl get nodes 
NAME      STATUS   ROLES    AGE   VERSION
server1   Ready    master   91m   v1.18.3
server2   Ready    master   58m   v1.18.3
server3   Ready    master   53m   v1.18.3

[root@server1 ~]# kubectl -n kube-system describe cm kubeadm-config 
Name:         kubeadm-config
Namespace:    kube-system
Labels:       <none>
Annotations:  <none>
Data
====
ClusterStatus:
----
apiEndpoints:
  server1:
    advertiseAddress: 172.25.1.1
    bindPort: 6443
  server2:
    advertiseAddress: 192.168.43.12
    bindPort: 6443
  server3:
    advertiseAddress: 192.168.43.13
    bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus

在这里插入图片描述
在这里插入图片描述


再增加一个节点
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述


5.测试集群高可用

1.运行容器

root@server1 ~]# kubectl run demo --images=busybox -it
Error: unknown flag: --images
See 'kubectl run --help' for usage.
[root@server1 ~]# kubectl run demo --image=busybox -it
If you don't see a command prompt, try pressing enter.
/ # ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
3: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue 
    link/ether a6:08:7d:47:2e:e6 brd ff:ff:ff:ff:ff:ff
    inet 10.244.3.2/24 brd 10.244.3.255 scope global eth0
       valid_lft forever preferred_lft forever
/ # 
Session ended, resume using 'kubectl attach demo -c demo -i -t' command when the pod is running
[root@server1 ~]# kubectl get pod -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE   NOMINATED NODE   READINESS GATES
demo   1/1     Running   1          30s   10.244.3.2   node   <none>           <none>

在这里插入图片描述


2.挂当前master
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述在这里插入图片描述
在这里插入图片描述
在这里插入图片描述


以下为keepalived与master不在一个节点的配置
server1 172.25.1.1 harbor仓库
server2 172.25.1.2 haproxy+keepalived
server3 172.25.1.3 haproxy+keepalived
server4 172.25.1.4 master:server
server5 172.25.1.5 master
server6 172.25.1.6 master
server7 172.25.1.7 worke node
配置haproxy

在server2/3上
yum install haproxy keepalived -y
haproxy配置:
cd /etc/haproxy/
vim haproxy.cfg 
注意当haproxy和与k8s集群master节点在同一台主机上不能使用6443端口
当前我们是分开的,所以不影响
listen admin_status 方便在浏览器监控,可以不指定
    bind *:80    监控80端口
    mode http
    stats uri /status   访问地址:/status

frontend  main *:6443     指定端口
    mode tcp
    default_backend             apiserver
    
backend apiserver
    balance     roundrobin
    mode tcp
    server4  app1 172.25.10.4:6443 check   后端多个master节点
    server5  app2 172.25.10.5:6443 check
    server6  app3 172.25.10.6:6443 check

systemctl enable --now haproxy.service   开机启动

配置keepalive的

vim keepalived.cfg

! Configuration File for keepalived

global_defs {
   notification_email {
     root@localhost   邮箱
   }
   notification_email_from keepalived@localhost  服务邮箱
   smtp_server 127.0.0.1   本机回环网路地址
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER    备机server3为BACKUP
    interface eth0
    virtual_router_id 51
    priority 100   备机server3为优先级为50
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_haproxy
    }

    virtual_ipaddress {
    172.25.10.100      vip
    }
}

virtual_server 172.25.10.100 443 {
    delay_loop 6   健康检测时间间隔
    lb_algo rr    轮询
    lb_kind DR    DR模式
    #persistence_timeout 50  注释持续连接;当前是均衡调度
    protocol TCP    使用tcp模式

    real_server 172.25.10.4 6443 {
        weight 1
        TCP_CHECK {   
            connect_timeout 3   后端的master主机数量
            retry 3   失败之后重试次数
            delay_beforce_retry 3   重试间隔时间
        }

    }
    real_server 172.25.10.5 6443 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            retry 3
            delay_beforce_retry
        }     

    }
    real_server 172.25.10.6 6443 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            retry 3
            delay_beforce_retry
        }     
              
    }
}
systemctl enable --now keepalived.service 

master节点添加vip

yum install -y arptables_jf ##所有master 节点安装

##先给server4添加vip,其他主机需要在成为备选master主机之后并且能在高可用节点上看到后端的主机然后在添加vip
[root@server4 ~]# ip addr add 172.25.1.100/24 dev ens33  临时添加
[root@server4 ~]# arptables -A INPUT -d 172.25.1.100 -j DROP  拒绝所有访问172.25.10.100的IP主机来源
[root@server4 ~]# arptables -A OUPUT -s 172.25.1.100 -j mangle --mangle-ip-s 172.25.10.4     172.25.10.100——>172.25.10.4
[root@server4 ~]# arptables-save >  /etc/sysconfig/arptables   保存策略信息
[root@server4 ~]# cat /etc/sysconfig/arptables   查看
*filter
:INPUT ACCEPT
:OUTPUT ACCEPT
:FORWARD ACCEPT
-A INPUT -j DROP -d 172.25.1.100 
-A INPUT -j mangle -s 172.25.1.100 --mangle-ip-s 172.25.1.4 
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐