k8s构建一个高可用集群
1,初始化2,安装证书tar -xf TLS.tar.gz && cd TLS&& ./cfssl.shcd etcd[root@Dping-test-5 etcd]# cat server-csr.json{"CN": "etcd","hosts": ["10.1.1.28"...
环境:centos7
10.1.1.25 nginx + keepalived
10.1.1.26 nginx + keepalived
master
10.1.1.28 etcd1 +kube-scheduler+kube-controller-manager+kube-apiserver
10.1.1.29 etcd2 +kube-scheduler+kube-controller-manager+kube-apiserver
10.1.1.30 etcd3 +kube-scheduler+kube-controller-manager+kube-apiserver
node
10.1.1.31 kubelet+kube-proxy+docker
10.1.1.32 kubelet+kube-proxy+docker
10.1.1.33 kubelet+kube-proxy+docker
1,初始化
[root@Dping-test-5 ~]# rm -rf /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
yum clean all
[root@Dping-test-5 ~]# yum repolist
systemctl stop firewalld # 停止 firewalld
systemctl disable firewalld # 禁用 firewall 开机启动
sed -i 's/^SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
删除最后一行sawp分区
[root@Dping-test-5 ~]# vim /etc/fstab
ntpdate time.windows.com
[root@master2-29 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.1.1.28 k8s-master1
10.1.1.29 k8s-master2
10.1.1.30 k8s-master3
10.1.1.31 k8s-node-1
10.1.1.32 k8s-node-2
10.1.1.33 k8s-node-3
[root@master2-29 ~]# hostnamectl set-hostname k8s-node1-31
[root@master2-29 ~]# bash
设置内核连接数末尾追加
vim /etc/security/limits.conf
root soft nofile 65535
root hard nofile 65535
* soft nofile 65535
* hard nofile 65535
设置java连接数
sed -i '$a vm.max_map_count=262144' /etc/sysctl.conf && sysctl -p
重启
reboot
2,安装证书
tar -xf TLS.tar.gz && cd TLS && ./cfssl.sh
cd etcd
[root@Dping-test-5 etcd]# cat server-csr.json
{
"CN": "etcd",
"hosts": [
"10.1.1.28",
"10.1.1.29",
"10.1.1.30"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
修改成自己的ip,在执行脚本
[root@Dping-test-5 etcd]# ./generate_etcd_cert.sh
执行完会生成证书
[root@Dping-test-5 etcd]# ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem generate_etcd_cert.sh server.csr server-csr.json server-key.pem server.pem
在master 1,2,3上安装etcd集群
tar -xf etcd.tar.gz
替换证书
[root@Dping-test-5 etcd]# scp {ca,server,server-key}.pem 10.1.1.28:/root/etcd/ssl
修改配置文件
[root@master1-28 etcd]# cat cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.1.1.28:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.1.1.28:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.1.1.28:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.1.1.28:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.1.1.28:2380,etcd-2=https://10.1.1.29:2380,etcd-3=https://10.1.1.30:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
重master直接下发到另外2台
[root@master1-28 ~]# scp -r etcd root@10.1.1.28:/opt/
[root@master1-28 ~]# scp -r etcd root@10.1.1.29:/opt/
[root@master1-28 ~]# scp -r etcd root@10.1.1.30:/opt/
把启动文件放到systemd下
[root@master1-28 ~]# scp etcd.service root@10.1.1.28:/usr/lib/systemd/system
root@10.1.1.28's password:
etcd.service 100% 1078 991.6KB/s 00:00
[root@master1-28 ~]# scp etcd.service root@10.1.1.29:/usr/lib/systemd/system
root@10.1.1.29's password:
etcd.service 100% 1078 784.2KB/s 00:00
[root@master1-28 ~]# scp etcd.service root@10.1.1.30:/usr/lib/systemd/system
root@10.1.1.30's password:
etcd.service
修改另外2台的配置文件
[root@master2-29 ~]# cat /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.1.1.29:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.1.1.29:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.1.1.29:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.1.1.29:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.1.1.28:2380,etcd-2=https://10.1.1.29:2380,etcd-3=https://10.1.1.30:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@master3-30 ssl]# cat /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.1.1.30:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.1.1.30:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.1.1.30:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.1.1.30:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.1.1.28:2380,etcd-2=https://10.1.1.29:2380,etcd-3=https://10.1.1.30:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
3个节点重新加载下启动集群
[root@master1-28 ~]# systemctl daemon-reload
systemctl restart etcd && systemctl enable etcd
查看集群部署是否成功
[root@master1-28 ~]# /opt/etcd/bin/etcdctl \
> --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem \
> --endpoints="https://10.1.1.28:2379,https://10.1.1.29:2379,https://10.1.1.30:2379" \
> cluster-health
member 416db8be23591a75 is healthy: got healthy result from https://10.1.1.29:2379
member 644a71c6c00b5d4a is healthy: got healthy result from https://10.1.1.30:2379
member 8ee665e828ef4364 is healthy: got healthy result from https://10.1.1.28:2379
cluster is healthy
#######################################################################
开始部署k8s集群组件
1,修改颁发证书配置文件
[root@Dping-test-5 k8s]# cat server-csr.json
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"10.1.1.25",
"10.1.1.26",
"10.1.1.28",
"10.1.1.29",
"10.1.1.30",
"10.1.1.31",
"10.1.1.32",
"10.1.1.34",
"10.1.1.33"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
2,生成证书
[root@Dping-test-5 k8s]# ./generate_k8s_cert.sh
[root@Dping-test-5 k8s]# ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem generate_k8s_cert.sh kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem server.csr server-csr.json server-key.pem server.pem
3,拷贝证书到28
scp *.pem 10.1.1.28:/root/k8s/kubernetes/ssl/
[root@master1-28 ssl]# rm -rf kube-proxy*
4,修改配置文件
[root@master1-28 k8s]# cat kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://10.1.1.28:2379,https://10.1.1.29:2379,https://10.1.1.30:2379 \
--bind-address=10.1.1.28 \
--secure-port=6443 \
--advertise-address=10.1.1.28 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
[root@master1-28 k8s]# mv kubernetes/ /opt/
[root@master1-28 k8s]# mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
5,启动服务,并设定开机自启
systemctl restart kube-apiserver
systemctl restart kube-controller-manager
systemctl restart kube-scheduler
[root@master1-28 k8s]# for i in $(ls /opt/kubernetes/bin);do systemctl enable $i;done
[root@master1-28 k8s]# mv /opt/kubernetes/bin/kubectl /usr/local/bin/
6,检查,显示未知,但是没有报错,正常
[root@master1-28 k8s]# kubectl get cs
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-0 <unknown>
etcd-1 <unknown>
etcd-2 <unknown>
可以看到这3个组件是启动成功的
[root@master1-28 k8s]# ps -ef | grep kube
root 24517 1 3 12:06 ? 00:00:21 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://10.1.1.28:2379,https://10.1.1.29:2379,https://10.1.1.30:2379 --bind-address=10.1.1.28 --secure-port=6443 --advertise-address=10.1.1.28 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root 24751 1 1 12:09 ? 00:00:05 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root 24777 1 0 12:09 ? 00:00:01 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
root 25185 15019 0 12:15 pts/1 00:00:00 grep --color=auto kube
7.给kubelet-bootstrap授权
[root@master1-28 k8s]# cat /opt/kubernetes/cfg/token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
##########################################################################
node 节点,31,32,33
1,二进制安装docker
[root@node1-31 ~]# tar -xf docker-18.09.6.tgz
[root@node1-31 ~]# mv docker/* /usr/bin/
[root@node1-31 ~]# mv docker.service /usr/lib/systemd/system/
[root@node1-31 ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://a960lhr2.mirror.aliyuncs.com"],
"insecure-registries":["10.1.1.11"]
}
[root@node1-31 ~]# systemctl restart docker
[root@node1-31 ~]# systemctl enable docker
2,修改配置
[root@node1-31 kubernetes]# cat cfg/bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940
[root@node1-31 kubernetes]# cat cfg/kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
[root@node1-31 ~]# mv kubernetes/ /opt/
[root@node1-31 ~]# mv kubelet.service kube-proxy.service /usr/lib/systemd/system/
拷贝证书给node
[root@Dping-test-5 k8s]# scp ca.pem kube-proxy-key.pem kube-proxy.pem 10.1.1.31:/opt/kubernetes/ssl/
启动kubelet
systemctl restart kubelet && systemctl restart kube-proxy
systemctl enable kubelet && systemctl enable kube-proxy
回到master,给node 颁发证书
[root@master1-28 k8s]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-Lz9GXivI0TWw2DAYVzE3WDYPFvEIYGxQqIT0Oz55ZY0 104s kubelet-bootstrap Pending
[root@master1-28 k8s]# kubectl certificate approve node-csr-Lz9GXivI0TWw2DAYVzE3WDYPFvEIYGxQqIT0Oz55ZY0
certificatesigningrequest.certificates.k8s.io/node-csr-Lz9GXivI0TWw2DAYVzE3WDYPFvEIYGxQqIT0Oz55ZY0 approved
[root@master1-28 k8s]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-node1 NotReady <none> 10s v1.16.0
下面2台node的步骤基本一样不一样的地方就是名字
32
[root@k8s-node2-32 ~]# cat /opt/kubernetes/cfg/kubelet.conf
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--hostname-override=k8s-node2 \
--network-plugin=cni \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
[root@k8s-node2-32 ~]# cat /opt/kubernetes/cfg/kube-proxy-config.yml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-node2
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
scheduler: "rr"
iptables:
masqueradeAll: true
[root@k8s-node2-32 kubernetes]# cat cfg/bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940
[root@k8s-node2-32 kubernetes]# cat cfg/kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
33
[root@k8s-node3-33 ~]# cat /opt/kubernetes/cfg/kubelet.conf
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--hostname-override=k8s-node3 \
--network-plugin=cni \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
[root@k8s-node3-33 ~]# cat /opt/kubernetes/cfg/kube-proxy-config.yml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-node3
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
scheduler: "rr"
iptables:
masqueradeAll: true
[root@k8s-node-3-33 kubernetes]# cat cfg/kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
[root@k8s-node-3-33 kubernetes]# cat cfg/bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://10.1.1.28:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940
[root@k8s-master-28 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-node1 NotReady <none> 5m16s v1.16.0
k8s-node2 NotReady <none> 5m5s v1.16.0
k8s-node3 NotReady <none> 4m54s v1.16.0
####################################################################################
部署网络插件node1和node2
[root@k8s-node1-31 ~]# mkdir /opt/cni/bin /etc/cni/net.d -p
tar -xf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
mster运行flannel
[root@k8s-master-28 ~]# kubectl apply -f kube-flannel.yaml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
授权rbac
[root@k8s-master-28 ~]# kubectl apply -f apiserver-to-kubelet-rbac.yaml
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
安装coredns
[root@k8s-master-28 zujian]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
尝试启动一个pod
[root@k8s-master-28 ~]# kubectl create deployment web --image=nginx
[root@k8s-master-28 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
web-d86c95cc9-qfjl2 1/1 Running 0 40s 10.244.1.2 k8s-node2 <none> <none>
我们可以看到node2 上的pod 会启动cni0 和flannel通信
[root@k8s-node2-32 ~]# ifconfig
cni0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 10.244.1.1 netmask 255.255.255.0 broadcast 10.244.1.255
inet6 fe80::1c56:56ff:feac:2ec prefixlen 64 scopeid 0x20<link>
ether 1e:56:56:ac:02:ec txqueuelen 1000 (Ethernet)
RX packets 1 bytes 28 (28.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 8 bytes 656 (656.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
暴露一个service看集群有没有问题
[root@k8s-master-28 ~]# kubectl expose deployment web --port=80 --type=NodePort
可以看到集群正常,访问的是node节点加端口
[root@k8s-master-28 ~]# curl 10.1.1.32:31213
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
##############################################################################
现在开始多master高可用
scp-r /opt/kubernetes 10.1.1.29:/opt/
scp -r /opt/kubernetes 10.1.1.30:/opt/
这一步可以如果是前面etcd 集群在这个地方就可以省略
scp -r /opt/etcd/ssl 10.1.1.29:/opt/etcd
scp -r /opt/etcd/ssl 10.1.1.30:/opt/etcd
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service 10.1.1.29:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service 10.1.1.30:/usr/lib/systemd/system/
[root@k8s-master-28 ~]# scp /usr/local/bin/kubectl 10.1.1.29:/usr/local/bin/kubectl
[root@k8s-master-28 ~]# scp /usr/local/bin/kubectl 10.1.1.30:/usr/local/bin/kubectl
29
[root@k8s-master-29 ~]# cat /opt/kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://10.1.1.28:2379,https://10.1.1.29:2379,https://10.1.1.30:2379 \
--bind-address=10.1.1.29 \
--secure-port=6443 \
--advertise-address=10.1.1.29 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
[root@k8s-master-29 ~]# for i in $(ls /opt/kubernetes/bin/);do systemctl restart $i;systemctl enable $i;done
30
[root@k8s-master-30 ~]# cat /opt/kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://10.1.1.28:2379,https://10.1.1.29:2379,https://10.1.1.30:2379 \
--bind-address=10.1.1.30 \
--secure-port=6443 \
--advertise-address=10.1.1.30 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
[root@k8s-master-30 ~]# for i in $(ls /opt/kubernetes/bin/);do systemctl restart $i;systemctl enable $i;done
其他2台master查看资源,多master 完成
[root@k8s-master-30 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 32m
web-d86c95cc9-qfjl2 1/1 Running 0 57m
#############################################################################
开始安装nginx,2台
[root@lb1-25 ~]# yum -y install nginx
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 65535;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 10.1.1.28:6443;
server 10.1.1.29:6443;
server 10.1.1.30:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
# server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}
systemctl enable nginx
########################################
安装keepalived 2台 25 26
yum -y install keepalived
25
[root@lb1-25 ~]# cat /etc/keepalived/check_nginx.sh
#!/bin/bash
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi
[root@lb1-25 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.1.1.34/24
}
track_script {
check_nginx
}
}
chmod +x check_nginx.sh
26
[root@lb2-26 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_BACKUP
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 90 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.1.1.34/24
}
track_script {
check_nginx
}
}
[root@lb2-26 ~]# cat /etc/keepalived/check_nginx.sh
#!/bin/bash
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi
chmod +x check_nginx.sh
systemctl restart keepalived && systemctl enable keepalived
查看vip
[root@lb1-25 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:fa:a7:c5 brd ff:ff:ff:ff:ff:ff
inet 10.1.1.25/24 brd 10.1.1.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.1.1.34/24 scope global secondary eth0
valid_lft forever preferred_lft forever
inet6 fe80::5db9:bb1d:8e6f:65ad/64 scope link noprefixroute
valid_lft forever preferred_lft forever
######################################################
修改node master ip指向vip
31
[root@k8s-node-1-31 ~]# cd /opt/kubernetes/cfg/
[root@k8s-node-1-31 cfg]# grep -r 10.1.1.28 .
./bootstrap.kubeconfig: server: https://10.1.1.28:6443
./kube-proxy.kubeconfig: server: https://10.1.1.28:6443
./kubelet.kubeconfig: server: https://10.1.1.28:6443
[root@k8s-node-1-31 cfg]# sed -i 's#10.1.1.28#10.1.1.34#g' *
[root@k8s-node-1-31 cfg]# grep -r 10.1.1.28 .
[root@k8s-node-1-31 cfg]# grep -r 10.1.1.34 .
./bootstrap.kubeconfig: server: https://10.1.1.34:6443
./kube-proxy.kubeconfig: server: https://10.1.1.34:6443
./kubelet.kubeconfig: server: https://10.1.1.34:6443
32
[root@k8s-node2-32 ~]# cd /opt/kubernetes/cfg/
[root@k8s-node2-32 cfg]# sed -i 's#10.1.1.28#10.1.1.34#g' *
[root@k8s-node2-32 cfg]# grep -r 10.1.1.34 .
./bootstrap.kubeconfig: server: https://10.1.1.34:6443
./kube-proxy.kubeconfig: server: https://10.1.1.34:6443
./kubelet.kubeconfig: server: https://10.1.1.34:6443
33
[root@k8s-node-3-33 cfg]# grep -r 10.1.1.34 .
./bootstrap.kubeconfig: server: https://10.1.1.34:6443
./kube-proxy.kubeconfig: server: https://10.1.1.34:6443
./kubelet.kubeconfig: server: https://10.1.1.34:6443
3台重启服务
systemctl restart kubelet && systemctl restart kube-proxy
更多推荐
所有评论(0)