主机清单

master: 192.168.27.10  docker  kubelet  kubectl  kubeadm   keepalived

master1: 192.168.27.20   docker  k8s   kubelet  kubectl  kubeadm   keepalived

node1: 192.168.27.30     docker  k8s   kubelet   kubeadm

node2: 192.168.27.40     docker  k8s   kubelet   kubeadm

更改主机名

hostnamectl set-hostname master

hostnamectl set-hostname master1

hostnamectl set-hostname node01

hostnamectl set-hostname node02

添加对应域名解析

cat >> /etc/hosts << EOF

192.168.27.10 master

192.168.27.20 master1

192.168.27.30 node1

192.168.27.40 node2

EOF

防火墙,SElinux

systemctl stop firewalld

systemctl disable firewalld

setenforce 0

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

时间同步

timedatectl set-timezone Asia/Shanghai;timedatectl set-local-rtc 0

禁用swap(关闭虚拟内存,储存在硬盘的,影响读写效率)

swapoff -a && sed -i '/swap/s/^/#/' /etc/fstab      

free -h 查看禁用效果

配置集群无密登录 master到master1 node1,node2无密码

ssh-keygen -t rsa

ssh-copy-id root@master1

ssh-copy-id root@node1

ssh-copy-id root@node2

优化内核参数

[root@master ~]# modprobe br_netfilter

cat > /etc/sysctl.d/kubernetes.conf << EOF

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-ip6tables = 1

net.ipv4.ip_forward = 1

net.ipv4.tcp_tw_recycle = 0

vm.swappiness = 0

vm.overcommit_memory = 1

vm.panic_on_oom = 0

fs.inotify.max_user_instances = 8192

fs.inotify.max_user_watches = 1048576

fs.file-max = 52706963

fs.nr_open = 52706963

net.ipv6.conf.all.disable_ipv6 = 1

net.netfilter.nf_conntrack_max = 2310720

EOF

sysctl -p /etc/sysctl.d/kubernetes.conf

#如果这个命令显示某个目录没有 就重启一下docker

[root@master ~]# scp /etc/sysctl.d/kubernetes.conf node1:/etc/sysctl.d/

[root@master ~]# scp /etc/sysctl.d/kubernetes.conf node2:/etc/sysctl.d/

[root@master ~]# scp /etc/sysctl.d/kubernetes.conf master1:/etc/sysctl.d/

[root@node01 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

[root@node02 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

[root@master1 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

所有服务器安装docker

配个源

阿里云  ***
$ sudo yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

安装前提
yum install -y yum-utils device-mapper-persistent-data lvm2

安装19.03.10版本docker

yum -y install docker-ce-19.03.10 docker-ce-cli-19.03.10 containerd.io

 systemctl start docker

更改docker默认Cgroup驱动

在/etc/docker/daemon.json文件中,添加一句话即可,当然这个和我们的设置加速器写在一起了。

mkdir /etc/docker

cat >> /etc/docker/daemon.json << EOF

{

  "registry-mirrors": ["https://1dmptu91.mirror.aliyuncs.com"],

  "exec-opts": ["native.cgroupdriver=systemd"]

}

EOF

systemctl daemon-reload

systemctl restart docker

所有master节点安装haproxy和keepalived

yum -y install haproxy keepalived

#主master节点

mv  /etc/keepalived/keepalived.conf  /etc/keepalived/keepalived.conf.bak

vim /etc/keepalived/keepalived.conf

global_defs {

   router_id lb1

#路由id号,和主服务器必须不同(删除vrrp_strict行)

}

 vrrp_script chk_haproxy {

    script "/data/check_haproxy.sh"

   interval 2

   weight 2

 }

vrrp_instance VI_1 {

    state MASTER

#状态:BACKUP备   MASTER主

    interface ens33

    virtual_router_id 51

    priority 100

#优先级:备比主要小

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

        192.168.27.254

#虚拟路由ip,公共ip

    }

}

#备主master1

mv  /etc/keepalived/keepalived.conf  /etc/keepalived/keepalived.conf.bak

把主master的拷贝过来 改个优先级和状态为 BACKUP

修改haproxy配置文件

两台主节点 都改一样的 最后的ip换成自己的两个主节点ip   后面端口号别动

mv /etc/haproxy/haproxy.cfg  /etc/haproxy/haproxy.cfg.bak

vim /etc/haproxy/haproxy.cfg

global

    log /dev/log    local0

    log /dev/log    local1 notice

    pidfile     /var/run/haproxy.pid

    chroot /var/lib/haproxy

    stats socket /var/run/haproxy-admin.sock mode 660 level admin

    stats timeout 30s

    user haproxy

    group haproxy

    daemon

    nbproc 1

defaults

    log     global

    timeout connect 5000

    timeout client  10m

    timeout server  10m

listen  admin_stats

    bind 0.0.0.0:10080

    mode http

    log 127.0.0.1 local0 err

    stats refresh 30s

    stats uri /status

    stats realm welcome login\ Haproxy

    stats auth admin:123456

    stats hide-version

    stats admin if TRUE

listen kube-master

    bind 0.0.0.0:8443

    mode tcp

    option tcplog

    balance source

    server master 192.168.27.10:6443 check inter 2000 fall 2 rise 2 weight 1

server master1 192.168.27.20:6443 check inter 2000 fall 2 rise 2 weight 1

都启动

systemctl start haproxy

systemctl enable haproxy

systemctl start keepalived

systemctl enable keepalived

看下漂移ip

ip a show dev ens33

是否在第一台上

两台主安装kubelet  kubectl  kubeadm

两台从安装kubelet kubeadm

所有版本都必须相同

yum install kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0 -y --nogpgcheck

都启动

systemctl start kubelet

systemctl enable kubelet

主master:

在初始化之前需要先把一些下载好镜像文件 提前导入好

导入本地镜像文件

docker load -i 跟名字

共是8个包,我都提前下载直接随便拉某个目录下加载就行

coredns.tar

etcd.tar

kube-apiserver.tar

kube-controller-manager.tar

kube-proxy.tar

pause.tar

flannel.tar

kube-scheduler.tar

kubeadm初始化

kubeadm init --kubernetes-version=v1.18.0 --image-repository registry.aliyuncs.com/google_containers --apiserver-advertise-address 192.168.27.254:8443 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12

成功后提示:

下面这个是两个主都要创的 立刻去创

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

下面这两个结果做保存

下面这个是备主连主的命令

 kubeadm join 192.168.27.254:8443 --token hps85k.h4zgc1vrcq0hw9bd \

    --discovery-token-ca-cert-hash sha256:8057206b9e00740b2dc52b6957b516460ecdf6168885b3502ed058349d48015d \

    --control-plane --certificate-key 3f4f9625c46eaa17a5ce99f0d64436d87d270f2719374b67036e6b8f828d178f

下面这个是node节点连主的命令

kubeadm join 192.168.27.254:8443 --token hps85k.h4zgc1vrcq0hw9bd \

--discovery-token-ca-cert-hash sha256:8057206b9e00740b2dc52b6957b516460ecdf6168885b3502ed058349d48015d

重启主master

systemctl restart kubelet

kubectl get nodes

NAME     STATUS     ROLES    AGE    VERSION

master   NotReady   master   5m3s   v1.18.0

**可以看出master的状态是未就绪(NotReady**),之所以是这种状态是因为还缺少一个附件**flannel**,没有网络各Pod是无法通信的

//添加网络组件(flannel),组件flannel可以通过https://github.com/coreos/flannel中获取

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

再次查看集群节点状态

[root@master ~]# kubectl get nodes

NAME     STATUS   ROLES    AGE   VERSION

master   Ready    master   13m   v1.18.0

设置kubectl命令行工具自动补全功能(备主master1 也要下载)

yum install -y bash-completion

source /usr/share/bash-completion/bash_completion

source <(kubectl completion bash)

echo "source <(kubectl completion bash)" >> ~/.bashrc

以上是主节点的安装部署,然后是node1 node2节点的安装,和加入集群,这里注意,验证,node01节点已经准备好了相关的镜像。

两台node节点操作一样

下面这条命令就是前面初始化做保存的 在上直接复制过来确定

[root@node01 ~]# kubeadm join 192.168.27.10:6443 --token njus35.kw3hxkys3urmnuob --discovery-token-ca-cert-hash sha256:05761b73b571c18eebd6972fb70323cd3c4d8e0aa7514efa2680411310424184

node节点需要几个镜像,可以手动先下载一下,或者从master节点同步

这个可以先查看下都有没有

docker images (查看镜像)

下面这三个镜像就是master管理节点上前面初始化之前要加载的其中的三个 没有可以从master上直接打包成tar包cp过来或者直接下载

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.0

docker pull quay.io/coreos/flannel:v0.15.1

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2

保存镜像为tar包

docker save -o nginx.tar nginx:latest

备主master1 :

下面这条命令就是前面初始化做保存的 直接复制过来

[root@master1 ~]#  kubeadm join 192.168.27.254:8443 --token hps85k.h4zgc1vrcq0hw9bd \

    --discovery-token-ca-cert-hash sha256:8057206b9e00740b2dc52b6957b516460ecdf6168885b3502ed058349d48015d \

    --control-plane --certificate-key 3f4f9625c46eaa17a5ce99f0d64436d87d270f2719374b67036e6b8f828d178f

[root@master ~]# kubectl get nodes (没出错的情况下 等node01 Ready 需要多等一会)

NAME     STATUS   ROLES    AGE   VERSION

master   Ready    master   18m   v1.18.0

master1   Ready    master   18m   v1.18.0

node1   Ready    <none>   68s   v1.18.0

node2    Ready    <none>   68s   v1.18.0

> PS: 确保所有pod都是running状态。(没出错的情况下 等所有running 需要多等一会)

//在master验证k8s组件 如下所示代表安装组件完整且是running状态

[root@master ~]# kubectl get pod -n kube-system

去除Master节点污点,使其可以分配Pod资源

##下面的命令在备主master1执行即可

kubectl taint nodes --all node-role.kubernetes.io/master-

验证高可用

关掉主master

systemctl stop kubelet

正常 漂移地址就会飘到备主上

ip a show dev ens33

##主master关机后正常情况下VIP地址会跳到备主master1上

##从上面信息可以看到VIP地址飘逸成功,查看各个pod是否正常。

[root@master1 ~]# kubectl get pod --all-namespaces

可以看的到pod都是Running状态。

##查看各个节点状态:

[root@master1 ~]# kubectl get nodes

NAME      STATUS     ROLES                  AGE    VERSION

master   NotReady   control-plane,master   2d1h   v1.18.0

master1   Ready      control-plane,master   2d1h   v1.18.0

node1   Ready      control-plane,master   2d1h   v1.18.0

node2     Ready      <none>                 2d1h   v1.18.0

#可以看的出来master1状态为NotReady

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐