第八课:尚硅谷K8s学习-k8s集群的高可用
第八课:尚硅谷K8s学习-k8s集群的高可用tags:golang2019尚硅谷categories:K8s高可用集群文章目录第八课:尚硅谷K8s学习-k8s集群的高可用第一节集群的高可用1.1 集群理解1.2集群构建系统环境初始化1.3 虚拟机Kubeadm部署安装1.3在主节点启动Haproxy与Keepalived容器1.4 安装Kubeadm1.5初始化主节点1.6添加其他主节点1.7 E
·
第八课:尚硅谷K8s学习-k8s集群的高可用
tags:
- golang
- 2019尚硅谷
categories:
- K8s
- 高可用集群
文章目录
第一节 集群的高可用
1.1 集群理解
- 为了防止Master节点掉线,可用通过一些高可用方案对集群进行改造。
- 通过Haproxy或Nginx反向代理,访问ApiServer(通过https通信)
- k8s1.14版本之后才有高可用方案。1.14之前没有高可用方案。
1.2 集群构建系统环境初始化
# 设置主机名
hostnamectl set-hostname k8s-master01
# 让每台机子可以相互解析 在添加
vi /etc/hosts
192.168.1.10 k8s-master01
192.168.1.20 k8s-master02
192.168.1.21 k8s-master03
192.168.1.100 k8s-vip
# hosts复制到别的虚拟机
scp /etc/hosts root@k8s-master02:/etc/hosts
# 安装依赖包
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
# 关闭防火墙
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 调整内核参数,对于K8S
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf
# 调整系统时区
#设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
#将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
#重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
# 关闭不需要的服务
systemctl stop postfix && systemctl disable postfix
# 设置 rsyslogd 和 systemd journald
#创建持久化保存日志的目录
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
#持久化保存到磁盘
Storage=persistent
#压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
#最大占用空间 10G
SystemMaxUse=10G
#单日志文件最大 200M
SystemMaxFileSize=200M
#日志保存时间 2 周
MaxRetentionSec=2week
#不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
# 升级系统内核为 4.44(可选)
# CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,查看内核命令uname -r,升级步骤如下:
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
#设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
# 重启后产看内核变化啦
reboot
uname -r
sudo setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 关闭防火墙和ipttables 如果不设置后面加入节点可能有问题如:couldn't validate the identity of the API Server
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
EOF
sudo sysctl --system
# 关闭NUMA 加上numa=off
cp /etc/default/grub{,.bak}
vim /etc/default/grub
GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet numa=off"
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg
sudo systemctl stop firewalld
sudo systemctl disable firewalld
sudo swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
1.3 虚拟机Kubeadm部署安装
- kube-proxy开启ipvs的前置条件
# 由于ipvs已经加入到了内核的主干,所以为kube-proxy开启ipvs的前提需要加载以下的内核模块
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 脚本创建了的/etc/sysconfig/modules/ipvs.modules文件, 保证在节点重启后能自动加载所需模块。 使用lsmod | grep -e ip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块。
- 安装docker软件
# 安装依赖库
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# 导入阿里云的镜像仓库
sudo yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 下载安装docker docker 19.03
yum update -y && yum install -y docker-ce
# 创建/etc/docker目录
mkdir /etc/docker
# 设置一下daemon中cgroup为syetemd 默认有两个cgroup 存储文件格式为json-flle,大小为100Mb
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"registry-mirrors": ["https://0stg88gt.mirror.aliyuncs.com"]
}
EOF
# 创建目录存放docker配置文件
mkdir -p /etc/systemd/system/docker.service.d
#重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
1.3 在主节点启动Haproxy与Keepalived容器
导入脚本 > 运行 > 查看可用节点
复制到其他节点
docker load -i haproxy.tar
docker load -i keepalived.tar
# 修改脚本目录批量导入
tar -zvxf kubeadm-basic.images.tar.gz
sh load-images.sh
# 主节点
tar -zvxf start.keep.tar.gz
mv data / && cd /data/lb
# 修改haproxy配置文件 先保留一个 防止负载到未启动节点上而报错
vim etc/haproxy.cfg
server rancher01 192.168.1.10:6443
# server rancher02 192.168.1.20:6443
# server rancher03 192.168.1.21:6443
# 修改部署文件 start-haproxy.sh
vi start-haproxy.sh
MasterIP1=192.168.1.10
MasterIP2=192.168.1.20
MasterIP3=192.168.1.21
sh start-haproxy.sh
netstat -anpt | grep :6444
# 修改部署文件 start-keepalived.sh
vim start-keepalived.sh
VIRTUAL_IP=192.168.1.100
INTERFACE=ens33
sh start-keepalived.sh
1.4 安装Kubeadm
# 配置K8S的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装kubeadm, kubectl, and kubelet.
sudo yum install -y kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1
sudo systemctl restart kubelet
sudo systemctl enable kubelet
1.5 初始化主节点
# 生成默认初始化模板
kubeadm config print init-defaults > kubeadm-config.yaml
# 修改默认初始化模板
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.10 # 注意
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master # 注意
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.1.100:6444" # 注意
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1 # 注意
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16 # 注意
serviceSubnet: 10.96.0.0/12
scheduler: {}
--- # 注意
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
# 镜像拉取
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.1 k8s.gcr.io/kube-apiserver:v1.15.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.1 k8s.gcr.io/kube-controller-manager:v1.15.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.1 k8s.gcr.io/kube-scheduler:v1.15.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.1 k8s.gcr.io/kube-proxy:v1.15.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
# 根据模板初始化节点
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
# 配置一下
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 输出结果有两个 第一个添加高可用主节点
kubeadm join 192.168.1.100:6444 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:b59a47effc8f6881d73109ae8e00dc71c3dc1ccd0367438130fc2495de4fb938 --control-plane --certificate-key 208aa794c5cba79f10232b03540f88d9b8778c6abe1b83380129161350ab1468
# 第二个添加 node 节点
kubeadm join 192.168.1.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b59a47effc8f6881d73109ae8e00dc71c3dc1ccd0367438130fc2495de4fb938
# 安装fannel网络插件
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 用来拉取文件到xshell中
yum -y install lrzsz
# 如果拉取镜像失败
docker pull registry.cn-shanghai.aliyuncs.com/yijindami/flannel:v0.12.0-amd64
docker tag registry.cn-shanghai.aliyuncs.com/yijindami/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-amd64
docker load < flanneld-v0.12.0-arm64.docker
sudo systemctl restart kubelet
kubectl get nodes
kubectl get pod --all-namespaces
1.6 添加其他主节点
- Haproxy与Keepalived容器启动。
- 安装kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1
- 通过加入高可用主节点命令加入
kubeadm join 192.168.1.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b59a47effc8f6881d73109ae8e00dc71c3dc1ccd0367438130fc2495de4fb938 \
--control-plane --certificate-key 208aa794c5cba79f10232b03540f88d9b8778c6abe1b83380129161350ab1468
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 关闭掉10 在20上 发现三下后卡住
kubectl get node
# 修改配置 把100该成本机即可
vim .kube/config
server: https://192.168.1.20:6444
1.7 Etcd集群状态查看
# 主节点状态
kubectl get endpoints kube-controller-manager --namespace=kube-system
# 普通节点状态
kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
# 查看健康节点
kubectl -n kube-system exec etcd-k8s-master01 --etcdctl --endpoints=https://192.168.1.10:2379 --
ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
更多推荐
已为社区贡献8条内容
所有评论(0)