搭建k8s高可用集群
部署二进制高可用k8s集群,自建harbor镜像仓库以及通过ingress访问集群内部服务
搭建k8s高可用集群
服务器准备
基础环境准备
系统主机名配置、IP配置、系统参数优化,以及依赖的负载均衡和Harbor部署;主机名、iptables、防火墙、内核参数与资源限制等系统配置
服务器可以是私有云的虚拟机或物理机,也可以是公有云环境的虚拟机环境;如果是公司托管的IDC环境,可以直接将harbor和node节点部署在物理机环境,master节点,etcd、负载均衡等可以是虚拟机
类型 | 服务器IP | 主机名 | VIP |
k8s-master1 | 172.20.22.24 | master1.magedu.net | 172.20.22.20 |
k8s-master2 | 172.20.22.25 | master2.magedu.net | 172.20.22.20 |
k8s-master3 | 172.20.22.26 | master3.magedu.net | 172.20.22.20 |
harbor1 | 172.20.22.170 | harbor.magedu.net | |
etcd1 | 172.20.22.24 | etcd1.magedu.net | |
etcd2 | 172.20.22.25 | etcd2.magedu.net | |
etcd3 | 172.20.22.26 | etcd3.magedu.net | |
haproxy1 | 172.20.23.157 | ha1.magedu.net | |
haproxy2 | 172.20.23.246 | ha2.magedu.net | |
node节点1 | 172.20.22.27 | node1.magedu.net | |
node节点2 | 172.20.22.28 | node2.magedu.net | |
node节点3 | 172.20.22.30 | node3.magedu.net |
高可用负载均衡
#######ha1
# yum install -y keepalived haproxy
# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface em1
virtual_router_id 68
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
172.20.22.20/16 dev em1 label em1:1
}
}
# systemctl start keepalived
# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
# sysctl -p
# vim /etc/haproxy/haproxy.cfg
.....
##添加以下配置段
listen master-6443
bind 172.20.22.20:6443
mode tcp
balance roundrobin
server 172.20.22.25 172.20.22.25:6443 check inter 3000 fall 3 rise 5
server 172.20.22.26 172.20.22.26:6443 check inter 3000 fall 3 rise 5
server 172.20.22.27 172.20.22.27:6443 check inter 3000 fall 3 rise 5
# systemctl start haproxy
#######ha2
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
interface em1
virtual_router_id 68
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
172.20.22.20/16 dev em1 label em1:1
}
}
# systemctl start keepalived
# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
# sysctl -p
# vim /etc/haproxy/haproxy.cfg
.....
##添加以下配置段
listen master-6443
bind 172.20.22.20:6443
mode tcp
balance roundrobin
server 172.20.22.25 172.20.22.25:6443 check inter 3000 fall 3 rise 5
server 172.20.22.26 172.20.22.26:6443 check inter 3000 fall 3 rise 5
server 172.20.22.27 172.20.22.27:6443 check inter 3000 fall 3 rise 5
# systemctl start haproxy
ansible部署k8s集群
#部署节点安装ansible
root@master01:~# apt install python3-pip git
root@master01:~# pip3 install ansible -i https://mirrors.aliyun.com/pypi/simple/
#生成密钥对
root@master01:~# ssh-keygen -t rsa -P ''
root@master01:~# apt install -y sshpass
#分发公钥
root@master01:~# ssh-copy-id 172.20.22.24
root@master01:~# ssh-copy-id 172.20.22.25
root@master01:~# ssh-copy-id 172.20.22.26
root@master01:~# ssh-copy-id 172.20.22.27
root@master01:~# ssh-copy-id 172.20.22.28
root@master01:~# ssh-copy-id 172.20.22.30
#下载项目及组件
root@master01:~# export release=3.1.0
root@master01:~# curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
root@master01:~# chmod +x ezdown
root@master01:~# ./ezdown -D
root@master01:~# ls -lrt /etc/kubeasz/down/
total 1245996
-rw-r--r-- 1 root root 69158342 May 16 06:43 docker-20.10.5.tgz
-rw------- 1 root root 451969024 May 16 06:50 calico_v3.15.3.tar
-rw------- 1 root root 42592768 May 16 06:51 coredns_1.8.0.tar
-rw------- 1 root root 124833792 May 16 06:52 k8s-dns-node-cache_1.17.0.tar
-rw------- 1 root root 227933696 May 16 06:53 dashboard_v2.2.0.tar
-rw------- 1 root root 58150912 May 16 06:53 flannel_v0.13.0-amd64.tar
-rw------- 1 root root 34566656 May 16 06:54 metrics-scraper_v1.0.6.tar
-rw------- 1 root root 41199616 May 16 06:54 metrics-server_v0.3.6.tar
-rw------- 1 root root 692736 May 16 06:54 pause_3.4.1.tar
-rw------- 1 root root 692736 May 16 06:54 pause.tar
-rw------- 1 root root 45063680 May 16 06:55 nfs-provisioner_v4.0.1.tar
-rw------- 1 root root 179014144 May 16 06:55 kubeasz_3.1.0.tar
#创建集群
root@master01:~# cd /etc/kubeasz/
root@master01:/etc/kubeasz# ./ezctl new k8s-01
#编辑hosts文件,修改相关信息
root@master01:~# cd /etc/kubeasz/clusters/k8s-01/
root@master01:/etc/kubeasz/clusters/k8s-01# cat hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...),etcd节点
[etcd]
172.20.22.24
172.20.22.25
172.20.22.26
# master node(s),master节点
[kube_master]
172.20.22.24
172.20.22.25
172.20.22.26
# work node(s),node节点
[kube_node]
172.20.22.27
172.20.22.28
172.20.22.30
....
# [optional] loadbalance for accessing k8s from outside,输入vip的地址
[ex_lb]
172.20.22.6 LB_ROLE=backup EX_APISERVER_VIP=172.20.22.20 EX_APISERVER_PORT=6443
172.20.22.7 LB_ROLE=master EX_APISERVER_VIP=172.20.22.20 EX_APISERVER_PORT=6443
.....
#选择网络组件
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
#修改IP地址段,注意不能与现有的网段冲突
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.28.0.0/16"
....
###部署集群
root@master01:/etc/kubeasz# ./ezctl help setup
Usage: ezctl setup <cluster> <step>
available steps:
01 prepare to prepare CA/certs & kubeconfig & other system settings
02 etcd to setup the etcd cluster
03 container-runtime to setup the container runtime(docker or containerd)
04 kube-master to setup the master nodes
05 kube-node to setup the worker nodes
06 network to setup the network plugin
07 cluster-addon to setup other useful plugins
90 all to run 01~07 all at once
10 ex-lb to install external loadbalance for accessing k8s from outside
11 harbor to install a new harbor server or to integrate with an existed one
examples: ./ezctl setup test-k8s 01 (or ./ezctl setup test-k8s prepare)
./ezctl setup test-k8s 02 (or ./ezctl setup test-k8s etcd)
./ezctl setup test-k8s all
./ezctl setup test-k8s 04 -t restart_master
root@master01:/etc/kubeasz# ./ezctl setup k8s-01 all
###完成之后查看集群状态
root@master01:/etc/kubeasz# kubectl get node
NAME STATUS ROLES AGE VERSION
172.20.22.24 Ready,SchedulingDisabled master 10s v1.21.0
172.20.22.25 Ready,SchedulingDisabled master 10s v1.21.0
172.20.22.26 Ready,SchedulingDisabled master 10s v1.21.0
172.20.22.27 Ready node 10s v1.21.0
172.20.22.28 Ready node 10s v1.21.0
172.20.22.30 Ready node 10s v1.21.0
root@master01:~# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+-------------------+-------+----------+-------------+
| 172.20.22.26 | node-to-node mesh | up | 13:44:22 | Established |
| 172.20.22.24 | node-to-node mesh | up | 13:43:22 | Established |
| 172.20.22.27 | node-to-node mesh | up | 13:43:22 | Established |
| 172.20.22.28 | node-to-node mesh | up | 13:43:22 | Established |
| 172.20.22.30 | node-to-node mesh | up | 13:43:22 | Established |
+--------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
部署harbor
内部镜像将统一保存在内部harbor服务器
[root@node4 src]# ls -lrt
总用量 493016
-rw-r--r--. 1 root root 504847710 5月 22 2021 harbor-offline-installer-v2.2.2.tgz
[root@node4 src]# tar xf harbor-offline-installer-v2.2.2.tgz
[root@node4 src]# ls -lrth
总用量 482M
-rw-r--r--. 1 root root 482M 5月 22 2021 harbor-offline-installer-v2.2.2.tgz
drwxr-xr-x. 2 root root 122 5月 14 16:37 harbor
[root@node4 harbor]# mkdir certs
[root@node4 harbor]# openssl genrsa -out certs/harbor-ca.key
Generating RSA private key, 2048 bit long modulus
..............+++
.+++
e is 65537 (0x10001)
[root@node4 harbor]# openssl req -new -x509 -nodes -key certs/harbor-ca.key -subj "/CN=harbor.magedu.net" -days 7120 -out certs/harbor-ca.crt
[root@node4 harbor]# ls -rtl certs/
总用量 8
-rw-r--r--. 1 root root 1675 5月 14 16:39 harbor-ca.key
-rw-r--r--. 1 root root 1115 5月 14 16:40 harbor-ca.crt
[root@node4 harbor]# cp harbor.yml.tmpl harbor.yml
[root@node4 harbor]# grep -v '#' harbor.yml | grep '^[^$]'
hostname: harbor.magedu.net
http:
port: 80
https:
port: 443
certificate: /usr/local/src/harbor/certs/harbor-ca.crt
private_key: /usr/local/src/harbor/certs/harbor-ca.key
harbor_admin_password: 123456
[root@node4 harbor]# ./install.sh help
[root@node4 harbor]# ./install.sh --with-trivy
node登录harbor
所有node节点需要在/etc/hosts文件中手动添加解析
root@node01:~# mkdir /etc/docker/certs.d/harbor.magedu.net -p
[root@node4 harbor]# scp certs/harbor-ca.crt root@172.20.22.27:/etc/docker/certs.d/harbor.magedu.net
root@node01:~# vim /etc/hosts
......
172.20.22.170 harbor.magedu.net
root@node01:~# systemctl restart docker
root@node01:~# docker login harbor.magedu.net
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
#测试推送镜像
root@node01:~# docker tag alpine:latest harbor.magedu.net/images/alpine:latest
root@node01:~# docker push harbor.magedu.net/images/alpine:latest
Ingress
Ingress为Kubernetes集群中的服务提供了入口,可以提供负载均衡、SSL终止和基于名称的虚拟主机,在生产环境中常用的Ingress有Treafik、Nginx、HAProxy、Istio等。
部署ingress-nginx
官方文档:Ingress | Kubernetes
github:GitHub - kubernetes/ingress-nginx: NGINX Ingress Controller for Kubernetes
root@master01:~# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
root@master01:~# kubectl apply -f deploy.yaml # 需修改deploy.yaml中k8s.gcr.io的镜像地址
root@master01:~# docker pull registry.aliyuncs.com/google_containers/nginx-ingress-controller:v1.0.0
root@master01:~# docker pull registry.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.0
root@master01:~# kubectl apply -f deploy.yaml
root@master01:~# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-qrtwr 0/1 Completed 0 115s
ingress-nginx-admission-patch-ksvvp 0/1 Completed 2 115s
ingress-nginx-controller-7fdf4d55c7-c5vxz 1/1 Running 0 116s
root@master01:~# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller NodePort 10.68.20.240 <none> 80:30080/TCP,443:30443/TCP 3m7s
ingress-nginx-controller-admission ClusterIP 10.68.197.250 <none> 443/TCP 3m7s
k8s 执行 ingress yaml 文件报错:error when creating "ingress-myapp.yaml": Internal error occurred: failed calling webhook
root@master01:~# kubectl get ValidatingWebhookConfiguration
NAME WEBHOOKS AGE
ingress-nginx-admission 1 4m4s
#删除ingress-nginx-admission
root@master01:~# kubectl delete ValidatingWebhookConfiguration ingress-nginx-admission
Ingress Rules
host:可选,一般都会配置对应的域名。
path:每个路径都有一个对应的serviceName和servicePort,在流量到达服务之前,主机和路径都会与传入请求的内容匹配。
backend:描述Service和Port的组合。对Ingress匹配主机和路径的HTTP与HTTPS请求将被发送到对应的后端。
通过ingress实现基于域名的多虚拟主机
部署tomcat
前提:自制tomcat镜像上传至harbor镜像仓库,具体过程可参考通过dockerfile创建镜像
# cat tomcat-app1.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: magedu-tomcat-app1-deployment
namespace: webapp
spec:
replicas: 1
selector:
matchLabels:
app: magedu-tomcat-app1
template:
metadata:
labels:
app: magedu-tomcat-app1
spec:
containers:
- name: magedu-tomcat-app1-container
image: harbor.magedu.net/linux/tomcat:app1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 500m
memory: "512Mi"
---
kind: Service
apiVersion: v1
metadata:
name: magedu-tomcat-app1-service
namespace: webapp
spec:
type: ClusterIP
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: magedu-tomcat-app1
# kubectl apply -f tomcat-app1.yaml
# kubectl get pod -n webapp
NAME READY STATUS RESTARTS AGE
webapp-nginx-deployment-9bc4fc4f9-25c9h 1/1 Running 0 5m
# kubectl get svc -n webapp
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
magedu-tomcat-app1-service ClusterIP 10.68.18.88 <none> 80/TCP 12s
# cat tomcat-app2.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: magedu-tomcat-app2-deployment
namespace: webapp
spec:
replicas: 1
selector:
matchLabels:
app: magedu-tomcat-app2
template:
metadata:
labels:
app: magedu-tomcat-app2
spec:
containers:
- name: magedu-tomcat-app1-container
image: harbor.magedu.net/linux/tomcat:app2
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 500m
memory: "512Mi"
---
kind: Service
apiVersion: v1
metadata:
name: magedu-tomcat-app2-service
namespace: webapp
spec:
type: ClusterIP
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: magedu-tomcat-app2
# kubectl apply -f tomcat-app2.yaml
# kubectl get pod -n webapp
NAME READY STATUS RESTARTS AGE
magedu-tomcat-app1-deployment-68994cd7dd-fp5s5 1/1 Running 0 65s
magedu-tomcat-app2-deployment-6c96c8c7b-km45b 1/1 Running 0 20s
# kubectl get svc -n webapp
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
magedu-tomcat-app1-service ClusterIP 10.68.18.88 <none> 80/TCP 12s
magedu-tomcat-app2-service ClusterIP 10.68.175.162 <none> 80/TCP 17s
创建对应的ingress规则
# cat ingress_multi-host.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-nginx
namespace: webapp
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: www.m63.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: magedu-tomcat-app1-service
port:
number: 80
- host: mobile.m63.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: magedu-tomcat-app2-service
port:
number: 80
# kubectl apply -f ingress_multi-host.yaml
####通过前端ha访问测试,添加对应的配置
# vim /etc/haproxy/haproxy.cfg
....
listen webapp-80
bind 172.20.22.188:80
mode tcp
balance roundrobin
server 172.20.22.27 172.20.22.27:30080 check inter 3000 fall 3 rise 5
server 172.20.22.28 172.20.22.28:30080 check inter 3000 fall 3 rise 5
listen webapp-443
bind 172.20.22.188:443
mode tcp
balance roundrobin
server 172.20.22.27 172.20.22.27:30443 check inter 3000 fall 3 rise 5
server 172.20.22.28 172.20.22.28:30443 check inter 3000 fall 3 rise 5
####添加本地host测试
# cat /etc/hosts
172.20.22.188 www.m63.com mobile.m63.com
# curl http://www.m63.com/app1/
tomcat APP1 web page.
# curl http://mobile.m63.com/app2/
tomcat APP2 web page.
更多推荐
所有评论(0)