k8s v1.25 :三主一从+etcd高可用

# 固化IP地址
hostnamectl set-hostname k8smaster1 && bash
vi /etc/hosts
192.168.40.180 k8smaster1
192.168.40.181 k8smaster2
192.168.40.182 k8smaster3
192.168.40.183 k8snode1

setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config && getenforce
systemctl stop firewalld && systemctl disable firewalld
swapoff -a && vi /etc/fstab
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

modprobe br_netfilter && lsmod | grep netfilter
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl -p /etc/sysctl.d/k8s.conf
yum install yum-utils -y
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0

yum install ntpdate -y
ntpdate cn.pool.ntp.org
crontab -e
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org

service crond restart
yum install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack yum-utils device-mapper-persistent-data lvm2 -y 
yum install containerd.io-1.6.6 -y
mkdir -p /etc/containerd && containerd config default > /etc/containerd/config.toml
# 修改谷歌镜像仓库地址为阿里云镜像仓库地址
vi /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"
SystemdCgroup = true

systemctl enable containerd --now && systemctl status containerd
yum install docker-ce -y
systemctl enable docker --now && systemctl status docker
ctr image pull registry.aliyuncs.com/google_containers/pause:3.7
vi /etc/containerd/config.toml
config_path = "/etc/containerd/certs.d"

mkdir /etc/containerd/certs.d/docker.io/ -p
vim /etc/containerd/certs.d/docker.io/hosts.toml
[host."https://vh3bm52y.mirror.aliyuncs.com",host."https://registry.docker-cn.com"]
  capabilities = ["pull"]

systemctl restart containerd && systemctl status containerd
cat /etc/docker/daemon.json 
{
  "registry-mirrors":["https://registry.docker-cn.com","https://vh3bm52y.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub.mirror.c.163.com"]
}
systemctl restart docker && systemctl status docker
yum install kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0 -y
systemctl enable kubelet
# 在k8smaster1和k8smaster2节点上做高可用
yum install nginx keepalived nginx-mod-stream -y
vi /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
            server 192.168.40.180:6443 weight=5 max_fails=3 fail_timeout=30s;  
            server 192.168.40.181:6443 weight=5 max_fails=3 fail_timeout=30s;
            server 192.168.40.182:6443 weight=5 max_fails=3 fail_timeout=30s;  

    }
    
    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}

vi /etc/keepalived/keepalived.conf
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33  # 修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    # 虚拟IP
    virtual_ipaddress { 
        192.168.40.199/24
    } 
    track_script {
        check_nginx
    } 
}

vi /etc/keepalived/check_nginx.sh
#!/bin/bash
#1、判断Nginx是否存活
counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
if [ $counter -eq 0 ]; then
    #2、如果不存活则尝试启动Nginx
    service nginx start
    sleep 2
    #3、等待2秒后再次获取一次Nginx状态
    counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
    #4、再次进行判断,如Nginx还不存活则停止Keepalived,让地址进行漂移
    if [ $counter -eq 0 ]; then
        service  keepalived stop
    fi
fi

chmod +x /etc/keepalived/check_nginx.sh
systemctl daemon-reload && systemctl start nginx && systemctl start keepalived && systemctl enable nginx keepalived && systemctl status keepalived && systemctl status nginx
reboot -f

# 在四台服务器上设置容器运行时
crictl config runtime-endpoint /run/containerd/containerd.sock
kubeadm config print init-defaults > kubeadm.yaml
vim kubeadm.yaml
#localAPIEndpoint:
#  advertiseAddress: 1.2.3.4
#  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
#  name: node

imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
controlPlaneEndpoint: 192.168.40.199:16443
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

# 在四台服务器上上传k8s的镜像,本地有了就省去了从远端镜像仓库下载镜像的时间
ctr -n=k8s.io images import k8s_1.25.0.tar.gz

ctr是containerd自带的工具,有命名空间的概念,若是k8s相关的镜像,都默认在k8s.io这个命名空间,所以导入镜像时需要指定命名空间为k8s.io

crictl images
kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
cat $HOME/.kube/config 
kubectl config view
kubectl get pods -n kube-system
# 扩容k8s控制节点,k8smaster2和k8smaster3
cd /root/ && mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube
scp /etc/kubernetes/pki/ca.* k8smaster3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* k8smaster3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* k8smaster3:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/front-proxy-ca.* k8smaster3:/etc/kubernetes/pki/
kubeadm token create --print-join-command
kubeadm join 192.168.40.199:16443 --token x5kijl.pmkdap8ucnxqibl1 --discovery-token-ca-cert-hash sha256:4bf1de99aa3ef5753ad137c464b860884c4f7de912c451114b6acb7ec52812ec --control-plane --ignore-preflight-errors=SystemVerification
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get pods -n kube-system
# 扩容工作节点
kubeadm join 192.168.40.199:16443 --token x5kijl.pmkdap8ucnxqibl1 --discovery-token-ca-cert-hash sha256:4bf1de99aa3ef5753ad137c464b860884c4f7de912c451114b6acb7ec52812ec --ignore-preflight-errors=SystemVerification
kubectl  get nodes
kubectl label nodes k8snode1 node-role.kubernetes.io/work=work
kubectl  get nodes
# 四台服务器导入calico镜像
ctr -n=k8s.io images import calico.tar.gz
kubectl apply -f calico.yaml
kubectl get pods -n kube-system
kubectl get node
# 如果是多网卡,可以通过编辑calico.yaml文件指定某个网卡
# vim calico.yaml
# 找到Daemonset.env
# - name: IP_AUTODETECTION_METHOD
#   value: "interface=ens33"
# 不指定的话,默认使用第一个网卡
# 配置etcd高可用
vim /etc/kubernetes/manifests/etcd.yaml
    - --initial-cluster=k8smaster1=https://192.168.40.180:2380,k8smaster3=https://192.168.40.182:2380,k8smaster2=https://192.168.40.181:2380

systemctl restart kubelet
# 检验etcd集群是否存在
docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt member list
# 检验etcd集群是否正常
docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt --endpoints=https://192.168.40.180:2379,https://192.168.40.181:2379,https://192.168.40.182:2379 endpoint health --cluster
# 以表格形式展示
docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 etcdctl -w table --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt --endpoints=https://192.168.40.180:2379,https://192.168.40.181:2379,https://192.168.40.182:2379 endpoint status --cluster
# 测试网络
kubectl run busybox --image docker.io/library/busybox:1.28 --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
/ # ping www.baidu.com
/ # nslookup kubernetes.default.svc.cluster.local



calico文件的下载地址是:https://docs.projectcalico.org/manifests/calico.yaml
https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart

  • ctr是containerd自带的cli命令行工具
  • crictl是k8s中CRI(容器运行时接口)的客户端,k8s使用该客户端和containerd进行交互。crictl缺少对具体镜像的管理能力,
cat /etc/crictl.yaml

k8s集群中一台控制节点出现问题后移除恢复步骤

# 将etc改为两个
docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt member remove xxxxxxxxx
vim /etc/kubernetes/manifests/etcd.yaml
systemctl restart kubelet
kubeadm reset
cd /root/ && mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube
scp /etc/kubernetes/pki/ca.* k8smaster3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* k8smaster3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* k8smaster3:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/front-proxy-ca.* k8smaster3:/etc/kubernetes/pki/
kubeadm token create --print-join-command
kubeadm join 192.168.40.199:16443 --token x5kijl.pmkdap8ucnxqibl1 --discovery-token-ca-cert-hash sha256:4bf1de99aa3ef5753ad137c464b860884c4f7de912c451114b6acb7ec52812ec --control-plane --ignore-preflight-errors=SystemVerification
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐