二进制搭建 Kubernetes v1.20最终版
二进制搭建 Kubernetes v1.20k8s集群master01:20.0.0.101kube-apiserver kube-controller-manager kube-scheduler etcdk8s集群master02:20.0.0.102k8s集群node01:20.0.0.103kubelet kube-proxy dockerk8s集群node02:20.0.0.104etc
二进制搭建 Kubernetes v1.20
k8s集群master01:20.0.0.101 kube-apiserver kube-controller-manager kube-scheduler etcd
k8s集群master02:20.0.0.102
k8s集群node01:20.0.0.103 kubelet kube-proxy docker
k8s集群node02:20.0.0.104
etcd集群节点1:20.0.0.101 etcd
etcd集群节点2:20.0.0.103
etcd集群节点3:20.0.0.104
负载均衡nginx+keepalive01(master):20.0.0.105
负载均衡nginx+keepalive02(backup):20.0.0.106
VIP: 20.0.0.100
三台主机都操作
操作系统初始化配置
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#关闭selinux
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
#关闭swap
关闭交换分区,提升性能
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#根据规划设置主机名
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02
#三台主机添加hosts
cat >> /etc/hosts << EOF
20.0.0.101 master01
20.0.0.103 node01
20.0.0.104 node02
EOF
#调整内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
sysctl --system
#时间同步
yum install ntpdate -y
ntpdate ntp.aliyun.com
部署 docker引擎
//所有 node 节点部署docker引擎
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
systemctl start docker.service
systemctl enable docker.service
部署 etcd 集群
证书里的ip地址需要修改
//在 master01 节点上操作
#准备cfssl证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl*
### 生成Etcd证书 ###
mkdir /opt/k8s
cd /opt/k8s/
#上传 etcd-cert.sh 和 etcd.sh 到 /opt/k8s/ 目录中
chmod +x etcd-cert.sh etcd.sh
#创建用于生成CA证书、etcd 服务器证书以及私钥的目录
mkdir /opt/k8s/etcd-cert
mv etcd-cert.sh etcd-cert/
cd /opt/k8s/etcd-cert/
./etcd-cert.sh #生成CA证书、etcd 服务器证书以及私钥
ls
ca-config.json ca-csr.json ca.pem server.csr server-key.pem
ca.csr ca-key.pem etcd-cert.sh server-csr.json server.pem
#上传 etcd-v3.4.9-linux-amd64.tar.gz 到 /opt/k8s 目录中,启动etcd服务
https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
cd /opt/k8s/
tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
ls etcd-v3.4.9-linux-amd64
Documentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md
#创建用于存放 etcd 配置文件,命令文件,证书的目录
mkdir -p /opt/etcd/{cfg,bin,ssl}
cd /opt/k8s/etcd-v3.4.9-linux-amd64/
mv etcd etcdctl /opt/etcd/bin/
cp /opt/k8s/etcd-cert/*.pem /opt/etcd/ssl/
cd /opt/k8s/
./etcd.sh etcd01 20.0.0.101 etcd02=https://20.0.0.103:2380,etcd03=https://20.0.0.104:2380
#进入卡住状态等待其他节点加入,这里需要三台etcd服务同时启动,如果只启动其中一台后,
服务会卡在那里,直到集群中所有etcd节点都已启动,可忽略这个情况
#可另外打开一个窗口查看etcd进程是否正常
ps -ef | grep etcd
#把etcd相关证书文件、命令文件和服务管理文件全部拷贝到另外两个etcd集群节点
scp -r /opt/etcd/ root@20.0.0.103:/opt/
scp -r /opt/etcd/ root@20.0.0.104:/opt/
scp /usr/lib/systemd/system/etcd.service root@20.0.0.103:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service root@20.0.0.104:/usr/lib/systemd/system/
//在 node01 节点上操作
vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd02" #修改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://20.0.0.103:2380" #修改
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.103:2379" #修改
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.103:2380" #修改
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.103:2379" #修改
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.101:2380,etcd02=https://20.0.0.103:2380,etcd03=https://20.0.0.104:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#启动etcd服务
systemctl start etcd
systemctl enable etcd
systemctl status etcd
//在 node02 节点上操作
vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd03" #修改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://20.0.0.104:2380" #修改
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.104:2379" #修改
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.104:2380" #修改
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.104:2379" #修改
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.101:2380,etcd02=https://20.0.0.103:2380,etcd03=https://20.0.0.104:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#启动etcd服务
systemctl start etcd
systemctl enable etcd
systemctl status etcd
#检查etcd群集状态
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://20.0.0.101:2379,https://20.0.0.103:2379,https://20.0.0.104:2379" endpoint health --write-out=table
+-----------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+-----------------------------+--------+-------------+-------+
| https://20.0.0.104:2379 | true | 56.973943ms | |
| https://20.0.0.101:2379 | true | 68.752723ms | |
| https://20.0.0.103:2379 | true | 69.001616ms | |
+-----------------------------+--------+-------------+-------+
------------------------------------------------------------------------------------------
--cert-file:识别HTTPS端使用SSL证书文件
--key-file:使用此SSL密钥文件标识HTTPS客户端
--ca-file:使用此CA证书验证启用https的服务器的证书
--endpoints:集群中以逗号分隔的机器地址列表
cluster-health:检查etcd集群的运行状况
------------------------------------------------------------------------------------------
#查看etcd集群成员列表
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://20.0.0.101:2379,https://20.0.0.103:2379,https://20.0.0.104:2379" --write-out=table member list
+------------------+---------+--------+-----------------------------+-----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+-----------------------------+-----------------------------+------------+
| a38688e52fefd345 | started | etcd02 | https://20.0.0.103:2380 | https://20.0.0.103:2379 | false |
| cf4f326398a30bd2 | started | etcd03 | https://20.0.0.104:2380 | https://20.0.0.104:2379 | false |
| fc34ab8516b45b7b | started | etcd01 | https://20.0.0.101:2380 | https://20.0.0.101:2379 | false |
+------------------+---------+--------+-----------------------------+-----------------------------+------------+
------------------------------ 部署 Master 组件 ------------------------------
//在 master01 节点上操作
#上传 master.zip 和 k8s-cert.sh 到 /opt/k8s 目录中,解压 master.zip 压缩包
cd /opt/k8s/
unzip master.zip
chmod +x *.sh
#创建kubernetes工作目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
#创建用于生成CA证书、相关组件的证书和私钥的目录
mkdir /opt/k8s/k8s-cert
mv /opt/k8s/k8s-cert.sh /opt/k8s/k8s-cert
cd /opt/k8s/k8s-cert/
./k8s-cert.sh #生成CA证书、相关组件的证书和私钥
ls *pem
admin-key.pem apiserver-key.pem ca-key.pem kube-proxy-key.pem
admin.pem apiserver.pem ca.pem kube-proxy.pem
#复制CA证书、apiserver相关证书和私钥到 kubernetes工作目录的 ssl 子目录中
cp ca*pem apiserver*pem /opt/kubernetes/ssl/
#上传 kubernetes-server-linux-amd64.tar.gz 到 /opt/k8s/ 目录中,解压 kubernetes 压缩包
#下载地址:https://github.com/kubernetes/kubernetes/blob/release-1.20/CHANGELOG/CHANGELOG-1.20.md
#注:打开链接你会发现里面有很多包,下载一个server包就够了,包含了Master和Worker Node二进制文件。
cd /opt/k8s/
tar zxvf kubernetes-server-linux-amd64.tar.gz
#复制master组件的关键命令文件到 kubernetes工作目录的 bin 子目录中
cd /opt/k8s/kubernetes/server/bin
cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
ln -s /opt/kubernetes/bin/* /usr/local/bin/
#创建 bootstrap token 认证文件,apiserver 启动时会调用,然后就相当于在集群内创建了一个这个用户,
接下来就可以用 RBAC 进行授权
cd /opt/k8s/
vim token.sh
#!/bin/bash
#获取随机数前16个字节内容,以十六进制格式输出,并删除其中空格
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
#生成 token.csv 文件,按照 Token序列号,用户名,UID,用户组 的格式生成
cat > /opt/kubernetes/cfg/token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
chmod +x token.sh
./token.sh
cat /opt/kubernetes/cfg/token.csv
#二进制文件、token、证书都准备好后,开启 apiserver 服务
cd /opt/k8s/
./apiserver.sh 20.0.0.101 https://20.0.0.101:2379,https://20.0.0.103:2379,https://20.0.0.104:2379
#检查进程是否启动成功
ps aux | grep kube-apiserver
netstat -natp | grep 6443 #安全端口6443用于接收HTTPS请求,用于基于Token文件或客户端证书等认证
#启动 scheduler 服务
cd /opt/k8s/
./scheduler.sh
ps aux | grep kube-scheduler
#启动 controller-manager 服务
./controller-manager.sh
ps aux | grep kube-controller-manager
#生成kubectl连接集群的kubeconfig文件
./admin.sh
#通过kubectl工具查看当前集群组件状态
kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
#查看版本信息
kubectl version
------------------------------ 部署 Worker Node 组件 ------------------------------
//在所有 node 节点上操作
#创建kubernetes工作目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
#上传 node.zip 到 /opt 目录中,解压 node.zip 压缩包,获得kubelet.sh、proxy.sh
cd /opt/
unzip node.zip
chmod +x kubelet.sh proxy.sh
//在 master01 节点上操作
#把 kubelet、kube-proxy 拷贝到 node 节点
cd /opt/k8s/kubernetes/server/bin
scp kubelet kube-proxy root@20.0.0.103:/opt/kubernetes/bin/
scp kubelet kube-proxy root@20.0.0.104:/opt/kubernetes/bin/
#上传kubeconfig.sh文件到/opt/k8s/kubeconfig目录中,生成kubelet初次加入集群引导kubeconfig文件和kube-proxy.kubeconfig文件
mkdir /opt/k8s/kubeconfig
cd /opt/k8s/kubeconfig
chmod +x kubeconfig.sh
./kubeconfig.sh 20.0.0.101 /opt/k8s/k8s-cert/
#把配置文件 bootstrap.kubeconfig、kube-proxy.kubeconfig 拷贝到 node 节点
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.103:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.104:/opt/kubernetes/cfg/
#RBAC授权,使用户 kubelet-bootstrap 能够有权限发起 CSR 请求证书
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
若执行失败,可先给kubectl绑定默认cluster-admin管理员集群角色,授权对整个集群的管理员权限
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
//在 node01 节点上操作
#启动 kubelet 服务
cd /opt/
./kubelet.sh 20.0.0.103
ps aux | grep kubelet
//在 master01 节点上操作,通过 CSR 请求
#检查到 node01 节点的 kubelet 发起的 CSR 请求,Pending 表示等待集群给该节点签发证书
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-duiobEzQ0R93HsULoS9NT9JaQylMmid_nBF3Ei3NtFE 12s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
#通过 CSR 请求
kubectl certificate approve node-csr-duiobEzQ0R93HsULoS9NT9JaQylMmid_nBF3Ei3NtFE
#Approved,Issued 表示已授权 CSR 请求并签发证书
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-duiobEzQ0R93HsULoS9NT9JaQylMmid_nBF3Ei3NtFE 2m5s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
#查看节点,由于网络插件还没有部署,节点会没有准备就绪 NotReady
kubectl get node
NAME STATUS ROLES AGE VERSION
20.0.0.103 NotReady <none> 108s v1.20.11
//在 node01 节点上操作
#加载 ip_vs 模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
#启动proxy服务
cd /opt/
./proxy.sh 20.0.0.103
ps aux | grep kube-proxy
nod02相同操作
部署 CNI 网络组件
---------- 部署 flannel ----------
//在 node01 节点上操作
#上传 cni-plugins-linux-amd64-v0.8.6.tgz 和 flannel.tar 到 /opt 目录中
cd /opt/
docker load -i flannel.tar
mkdir -p /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin
//在 master01 节点上操作
#上传 kube-flannel.yml 文件到 /opt/k8s 目录中,部署 CNI 网络
cd /opt/k8s
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-hjtc7 1/1 Running 0 7s
kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.80.11 Ready <none> 81m v1.20.11
---------- 部署 Calico ----------
//在 master01 节点上操作
#上传 calico.yaml 文件到 /opt/k8s 目录中,部署 CNI 网络
cd /opt/k8s
vim calico.yaml
#修改里面定义 Pod 的网络(CALICO_IPV4POOL_CIDR),需与前面 kube-controller-manager 配置文件指定的 cluster-cidr 网段一样
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16" #Calico 默认使用的网段为 192.168.0.0/16
kubectl apply -f calico.yaml
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-659bd7879c-4h8vk 1/1 Running 0 58s
calico-node-nsm6b 1/1 Running 0 58s
calico-node-tdt8v 1/1 Running 0 58s
#等 Calico Pod 都 Running,节点也会准备就绪
kubectl get nodes
---------- node02 节点部署 ----------
//在 node01 节点上操作
cd /opt/
scp kubelet.sh proxy.sh root@20.0.0.103:/opt/
scp kubelet.sh proxy.sh root@20.0.0.104:/opt/
scp -r /opt/cni root@20.0.0.103:/opt/
scp -r /opt/cni root@20.0.0.104:/opt/
//在 node02 节点上操作
#启动kubelet服务
cd /opt/
chmod +x kubelet.sh
./kubelet.sh 20.0.0.104
//在 master01 节点上操作
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-BbqEh6LvhD4R6YdDUeEPthkb6T_CJDcpVsmdvnh81y0 10s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-duiobEzQ0R93HsULoS9NT9JaQylMmid_nBF3Ei3NtFE 85m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
#通过 CSR 请求
kubectl certificate approve node-csr-BbqEh6LvhD4R6YdDUeEPthkb6T_CJDcpVsmdvnh81y0
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-BbqEh6LvhD4R6YdDUeEPthkb6T_CJDcpVsmdvnh81y0 23s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
node-csr-duiobEzQ0R93HsULoS9NT9JaQylMmid_nBF3Ei3NtFE 85m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
#加载 ipvs 模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
#使用proxy.sh脚本启动proxy服务
cd /opt/
chmod +x proxy.sh
./proxy.sh 20.0.0.104
#查看群集中的节点状态
kubectl get nodes
部署 CoreDNS
//在所有 node 节点上操作
#上传 coredns.tar 到 /opt 目录中
cd /opt
docker load -i coredns.tar
//在 master01 节点上操作
#上传 coredns.yaml 文件到 /opt/k8s 目录中,部署 CoreDNS
cd /opt/k8s
kubectl apply -f coredns.yaml
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5ffbfd976d-j6shb 1/1 Running 0 32s
#DNS 解析测试
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
kubectl run -it --rm dns-test --image=busybox:1.28.4 sh
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server: 10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
exit
---------- master02 节点部署 ----------
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#关闭selinux
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
#关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#根据规划设置主机名
hostnamectl set-hostname master02
su
#两个master添加hosts(添加到整个k8s集群的主机上,保证其他主机均有该映射)
cat >> /etc/hosts << EOF
20.0.0.101 master01
20.0.0.102 master02
20.0.0.103 node01
20.0.0.104 node02
EOF
#调整内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
#开启网桥模式,可将网桥的流量传递给iptables链
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
#关闭ipv6协议
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
sysctl --system
#时间同步
yum install ntpdate -y
ntpdate ntp.aliyun.com
//从 master01 节点上拷贝证书文件、各master组件的配置文件和服务管理文件到 master02 节点
scp -r /opt/etcd/ root@20.0.0.102:/opt/
scp -r /opt/kubernetes/ root@20.0.0.102:/opt
scp -r /root/.kube root@20.0.0.102:/root
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@20.0.0.102:/usr/lib/systemd/system/
//修改配置文件kube-apiserver中的IP
vim /opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://20.0.0.101:2379,https://20.0.0.103:2379,https://20.0.0.104:2379 \
--bind-address=20.0.0.102 \ #修改
--secure-port=6443 \
--advertise-address=20.0.0.102 \ #修改
......
//在 master02 节点上启动各服务并设置开机自启
systemctl start kube-apiserver.service
systemctl enable kube-apiserver.service
systemctl start kube-controller-manager.service
systemctl enable kube-controller-manager.service
systemctl start kube-scheduler.service
systemctl enable kube-scheduler.service
//查看node节点状态
ln -s /opt/kubernetes/bin/* /usr/local/bin/
kubectl get nodes
kubectl get nodes -o wide #-o=wide:输出额外信息;对于Pod,将输出Pod所在的Node名
负载均衡部署
//配置load balancer集群双机热备负载均衡(nginx实现负载均衡,keepalived实现双机热备)
##### 在lb01、lb02节点上操作 #####
//配置nginx的官方在线yum源,配置本地nginx的yum源
cat > /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
EOF
yum install nginx -y
//修改nginx配置文件,配置四层反向代理负载均衡,指定k8s群集2台master的节点ip和6443端口
vim /etc/nginx/nginx.conf
events {
worker_connections 1024;
}
#添加
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 20.0.0.101:6443;
server 20.0.0.102:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
http {
......
//检查配置文件语法
nginx -t
//启动nginx服务,查看已监听6443端口
systemctl start nginx
systemctl enable nginx
netstat -natp | grep nginx
//部署keepalived服务
yum install keepalived -y
//修改keepalived配置文件
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
#lb01节点的为 NGINX_MASTER,lb02节点的为 NGINX_BACKUP
vrrp_strict
}
#添加一个周期性执行的脚本
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh" #指定检查nginx存活的脚本路径
}
vrrp_instance VI_1 {
state MASTER #lb01节点的为 MASTER,lb02节点的为 BACKUP
interface ens33 #指定网卡名称 ens33
virtual_router_id 51 #指定vrid,两个节点要一致
priority 100 #lb01节点的为 100,lb02节点的为 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
20.0.0.100/24 #指定 VIP
}
track_script {
check_nginx #指定vrrp_script配置的脚本
}
}
//创建nginx状态检查脚本
vim /etc/nginx/check_nginx.sh
#!/bin/bash
/usr/bin/curl -I http://localhost &>/dev/null
if [ $? -ne 0 ];then
# /etc/init.d/keepalived stop
systemctl stop keepalived
fi
chmod +x /etc/nginx/check_nginx.sh
//启动keepalived服务(一定要先启动了nginx服务,再启动keepalived服务)
systemctl start keepalived
systemctl enable keepalived
ip addr #查看VIP是否生成
//修改node节点上的bootstrap.kubeconfig,kubelet.kubeconfig配置文件为VIP
cd /opt/kubernetes/cfg/
vim bootstrap.kubeconfig
server: https://20.0.0.100:6443
vim kubelet.kubeconfig
server: https://20.0.0.100:6443
vim kube-proxy.kubeconfig
server: https://20.0.0.100:6443
//重启kubelet和kube-proxy服务
systemctl restart kubelet.service
systemctl restart kube-proxy.service
//在 lb01 上查看 nginx 和 node 、 master 节点的连接状态
netstat -natp | grep nginx
##### 在 master01 节点上操作 #####
//测试创建pod
kubectl run nginx --image=nginx
//查看Pod的状态信息
kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-nf9sk 0/1 ContainerCreating 0 33s #正在创建中
kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-nf9sk 1/1 Running 0 80s #创建完成,运行中
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx-dbddb74b8-26r9l 1/1 Running 0 10m 172.17.36.2 192.168.80.15 <none>
//READY为1/1,表示这个Pod中有1个容器
//在对应网段的node节点上操作,可以直接使用浏览器或者curl命令访问
curl 172.17.36.2
master01 kubectl exec -it nginx bash
//这时在master01节点上查看nginx日志
kubectl logs nginx-dbddb74b8-nf9sk
部署 Dashboard
//在 master01 节点上操作
#上传 recommended.yaml 文件到 /opt/k8s 目录中
cd /opt/k8s
vim recommended.yaml
#默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30001 #添加
type: NodePort #添加
selector:
k8s-app: kubernetes-dashboard
kubectl apply -f recommended.yaml
#创建service account并绑定默认cluster-admin管理员集群角色
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
#获取token值
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
#使用输出的token登录Dashboard
https://20.0.0.103:30001
更多推荐
所有评论(0)