K8S集群部署
K8S集群部署主机规划主机IP角色vip10.167.45.10vipmaster-0110.167.45.11etcd1、master01master-0210.167.45.12etcd2、master02master-0310.167.45.13node-0110.167.45.14etcd3、node-01ssh无需密钥登录配置ssh-keygen -t rsassh-copy-id -i
K8S集群部署
主机规划
主机 | IP | 角色 |
---|---|---|
vip | 10.167.45.10 | vip |
master-01 | 10.167.45.11 | etcd1、master01 |
master-02 | 10.167.45.12 | etcd2、master02 |
node-01 | 10.167.45.14 | etcd3、node-01 |
ssh无需密钥登录配置
ssh-keygen -t rsa
ssh-copy-id -i .ssh/id_rsa.pub root@10.167.45.12
ssh-copy-id -i .ssh/id_rsa.pub root@10.167.45.14
初始化工作
#所有节点
###### 关闭 防火墙
systemctl stop firewalld
systemctl disable firewalld
###### 关闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
###### 关闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
# 设置 yum repository
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
###### yum epel源
yum install wget telnet -y
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum clean all
yum makecache
###### 修改 /etc/sysctl.conf
modprobe br_netfilter
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl -p /etc/sysctl.d/k8s.conf
###### 开启 ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 安装并启动 docker
yum install -y docker-ce-18.09.8 docker-ce-cli-18.09.8 containerd.io
# 添加ipvs支持
yum install -y nfs-utils ipset ipvsadm
安装步骤
一、安装keepalived配置VIP
- master01、master02两台主机配置
yum install -y keepalived
-
配置
master01配置
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 44
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.167.45.10
}
}
virtual_server 10.167.45.10 6443 {
delay_loop 6
lb_algo rr
lb_kind DR
protocol TCP
real_server 10.167.45.11 6443 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.167.45.12 6443 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
master02 配置
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 44
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.167.45.10
}
}
virtual_server 10.167.45.10 6443 {
delay_loop 6
lb_algo rr
lb_kind DR
protocol TCP
real_server 10.167.45.11 6443 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.167.45.12 6443 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
3、 启动服务
systemctl start keepalived
systemctl enable keepalived
systemctl status keepalived
4、 查看
[root@master01 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.167.45.10:6443 rr
# 这里只会有一个vip地址、等集群启动后有6443端口就会出现rr轮训地址
二、 搭建高可用etcd集群
1、在master-01上安装cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
2、安装etcd二进制文件
# 创建目录
mkdir -p /data/etcd/bin
# 下载
cd /tmp
wget https://storage.googleapis.com/etcd/v3.3.25/etcd-v3.3.25-linux-amd64.tar.gz
tar zxf etcd-v3.3.25-linux-amd64.tar.gz
cd etcd-v3.3.25-linux-amd64
mv etcd etcdctl /data/etcd/bin/
3、创建ca证书,客户端,服务端,节点之间的证书
Etcd属于server ,etcdctl 属于client,二者之间通过http协议进行通信。
ca证书 :自己给自己签名的权威证书,用来给其他证书签名
server证书: etcd的证书
client证书: 客户端,比如etcdctl的证书
peer证书: 节点与节点之间通信的证书
3.1、 创建目录
mkdir -p /data/etcd/ssl
cd /data/etcd/ssl
3.2、 创建ca证书
vim ca-config.json
{
"signing": {
"default": {
"expiry": "438000h"
},
"profiles": {
"server": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
},
"client": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
#server auth表示client可以用该ca对server提供的证书进行验证
#client auth表示server可以用该ca对client提供的证书进行验证
vim ca-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
}
}
#生成CA证书和私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# ls ca*
# ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
3.3、 创建客户端证书
vim client.json
{
"CN": "client",
"key": {
"algo": "ecdsa",
"size": 256
}
}
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client -
# ls ca*
# ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem client-key.pem client.pem
3.4、 生成server、peer证书
vim etcd.json
{
"CN": "etcd",
"hosts": [
"10.167.45.11",
"10.167.45.12",
"10.167.45.14"
],
"key": {
"algo": "ecdsa",
"size": 256
},
"names": [
{
"C": "CN",
"L": "BJ",
"ST": "BJ"
}
]
}
#生成
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server etcd.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd.json | cfssljson -bare peer
3.5、 将master-01的/data/etcd/ssl目录同步到master-02和node-01上
scp -r /data/etcd 10.167.45.12:/data/etcd
scp -r /data/etcd 10.167.45.14:/data/etcd
4、 systemd配置文件
master-01-10.167.45.11
vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/data/etcd/
ExecStart=/data/etcd/bin/etcd \
--name=etcd1 \
--cert-file=/data/etcd/ssl/server.pem \
--key-file=/data/etcd/ssl/server-key.pem \
--peer-cert-file=/data/etcd/ssl/peer.pem \
--peer-key-file=/data/etcd/ssl/peer-key.pem \
--trusted-ca-file=/data/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/data/etcd/ssl/ca.pem \
--initial-advertise-peer-urls=https://10.167.45.11:2380 \
--listen-peer-urls=https://10.167.45.11:2380 \
--listen-client-urls=https://10.167.45.11:2379 \
--advertise-client-urls=https://10.167.45.11:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster=etcd1=https://10.167.45.11:2380,etcd2=https://10.167.45.12:2380,etcd3=https://10.167.45.14:2380 \
--initial-cluster-state=new \
--data-dir=/data/etcd \
--snapshot-count=50000 \
--auto-compaction-retention=1 \
--max-request-bytes=10485760 \
--quota-backend-bytes=8589934592
Restart=always
RestartSec=15
LimitNOFILE=65536
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
master-01-10.167.45.12
vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/data/etcd/
ExecStart=/data/etcd/bin/etcd \
--name=etcd2 \
--cert-file=/data/etcd/ssl/server.pem \
--key-file=/data/etcd/ssl/server-key.pem \
--peer-cert-file=/data/etcd/ssl/peer.pem \
--peer-key-file=/data/etcd/ssl/peer-key.pem \
--trusted-ca-file=/data/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/data/etcd/ssl/ca.pem \
--initial-advertise-peer-urls=https://10.167.45.12:2380 \
--listen-peer-urls=https://10.167.45.12:2380 \
--listen-client-urls=https://10.167.45.12:2379 \
--advertise-client-urls=https://10.167.45.12:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster=etcd1=https://10.167.45.11:2380,etcd2=https://10.167.45.12:2380,etcd3=https://10.167.45.14:2380 \
--initial-cluster-state=new \
--data-dir=/data/etcd \
--snapshot-count=50000 \
--auto-compaction-retention=1 \
--max-request-bytes=10485760 \
--quota-backend-bytes=8589934592
Restart=always
RestartSec=15
LimitNOFILE=65536
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
node-01-10.167.45.14
vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/data/etcd/
ExecStart=/data/etcd/bin/etcd \
--name=etcd3 \
--cert-file=/data/etcd/ssl/server.pem \
--key-file=/data/etcd/ssl/server-key.pem \
--peer-cert-file=/data/etcd/ssl/peer.pem \
--peer-key-file=/data/etcd/ssl/peer-key.pem \
--trusted-ca-file=/data/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/data/etcd/ssl/ca.pem \
--initial-advertise-peer-urls=https://10.167.45.14:2380 \
--listen-peer-urls=https://10.167.45.14:2380 \
--listen-client-urls=https://10.167.45.14:2379 \
--advertise-client-urls=https://10.167.45.14:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster=etcd1=https://10.167.45.11:2380,etcd2=https://10.167.45.12:2380,etcd3=https://10.167.45.14:2380 \
--initial-cluster-state=new \
--data-dir=/data/etcd \
--snapshot-count=50000 \
--auto-compaction-retention=1 \
--max-request-bytes=10485760 \
--quota-backend-bytes=8589934592
Restart=always
RestartSec=15
LimitNOFILE=65536
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
5、 启动 etcd
#所有节点
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:11] Trailing garbage, ignoring.
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:12] Unknown lvalue '--cert-file' in section 'Service'
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:13] Unknown lvalue '--listen-peer-urls' in section 'Service'
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:14] Unknown lvalue '--listen-client-urls' in section 'Service'
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:15] Unknown lvalue '--advertise-client-urls' in section 'Service'
1月 05 10:57:19 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:16] Unknown lvalue '--initial-cluster-token' in section 'Service'
1月 05 10:57:19 master-01 systemd[1]: etcd.service lacks both ExecStart= and ExecStop= setting. Refusing.
1月 05 11:14:33 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:11] Trailing garbage, ignoring.
1月 05 11:14:33 master-01 systemd[1]: [/usr/lib/systemd/system/etcd.service:12] Unknown lvalue '--cert-file' in section 'Service'
1月 05 11:14:33 master-01 systemd[1]: etcd.service lacks both ExecStart= and ExecStop= setting. Refusing.
以上错误原因为脚本格式问题,最好从notepad编辑。
三、 安装 kubeadm, kubelet 和 kubectl
所有节点安装 kubeadm, kubelet 。kubectl是可选的,你可以安装在所有机器上,也可以只安装在一台master-01上。
1、 添加国内源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2、 指定版本安装
yum install -y kubelet-1.19.2 kubeadm-1.19.2 kubectl-1.19.2
3、在所有安装kubelet的节点上,将kubelet设置为开机启动
systemctl enable kubelet
四、 初始化master-01
1、 在master-011上将搭建etcd时生成的的ca证书和客户端证书复制到指定地点并重命名,如下
[root@master-01] ~$ mkdir -p /etc/kubernetes/pki/etcd/
#etcd集群的ca证书
[root@master-01] ~$ cp /data/etcd/ssl/ca.pem /etc/kubernetes/pki/etcd/
#etcd集群的client证书,apiserver访问etcd使用
[root@master-01] ~$ cp /data/etcd/ssl/client.pem /etc/kubernetes/pki/apiserver-etcd-client.pem
#etcd集群的client私钥
[root@master-01] ~$ cp /data/etcd/ssl/client-key.pem /etc/kubernetes/pki/apiserver-etcd-client-key.pem
#确保
[root@master01] ~$ tree /etc/kubernetes/pki/
/etc/kubernetes/pki/
├── apiserver-etcd-client-key.pem
├── apiserver-etcd-client.pem
└── etcd
└── ca.pem
1 directory, 3 files
2、 初始化配置文件
生成默认配置文件
kubeadm config print init-defaults > kubeadm-init.yaml
#编辑文件
vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.167.45.11 # 本机ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-01 # 本机hostname
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
# local:
# dataDir: /var/lib/etcd # 下面为自定义etcd集群
external:
endpoints:
- https://10.167.45.11:2379
- https://10.167.45.12:2379
- https://10.167.45.14:2379
caFile: /etc/kubernetes/pki/etcd/ca.pem #搭建etcd集群时生成的ca证书
certFile: /etc/kubernetes/pki/apiserver-etcd-client.pem #搭建etcd集群时生成的客户端证书
keyFile: /etc/kubernetes/pki/apiserver-etcd-client-key.pem #搭建etcd集群时生成的客户端密钥
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
controlPlaneEndpoint: 10.167.45.10 # vip地址
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
3、 执行初始化
kubeadm init --config=kubeadm-init.yaml
kubeadm join 10.167.45.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:ecdce1900bd6fe454523c7138ef27e201ff7b7a160149f9abaaaed64ed1c2ef2
kubeadm token create --print-join-command #查看加入集群信息
journalctl -xeu kubelet #查看kubelet状态
4、 配置kubectl
要使用 kubectl来 管理集群操作集群,需要做如下配置:
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
测试下,kubectl是否正常,需要注意是此时master-01的notready状态是正常的,因为我们还没有部署flannel网络插件
[root@master-01] # kubectl get node
NAME STATUS ROLES AGE VERSION
master-01 NotReady master 66s v1.19.2
五、 master-02加入集群
1、 首先将 master-01 中的 生成的集群共用的ca 证书,scp 到其他 master 机器。
scp -r /etc/kubernetes/pki/* 10.167.45.12:/etc/kubernetes/pki/
2、 将初始化配置文件复制到master-02
scp kubeadm-init.yaml 10.167.45.12:/root/
3、 编辑master-02 kubeadm-init.yaml文件
vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.167.45.12 #本机IP
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-02 #本机hostname
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
external:
endpoints:
- https://10.167.45.11:2379
- https://10.167.45.12:2379
- https://10.167.45.14:2379
caFile: /etc/kubernetes/pki/etcd/ca.pem
certFile: /etc/kubernetes/pki/apiserver-etcd-client.pem
keyFile: /etc/kubernetes/pki/apiserver-etcd-client-key.pem
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
controlPlaneEndpoint: 10.167.45.10
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
4、 初始化master-02
kubeadm init --config=kubeadm-init.yaml
kubeadm join 10.167.45.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:ecdce1900bd6fe454523c7138ef27e201ff7b7a160149f9abaaaed64ed1c2ef2
六、 worker节点加入集群
在master-01上生成加入key
kubeadm token create --print-join-command
kubeadm join 10.167.45.10:6443 --token 7hj6kq.diqh7mbspeffceek --discovery-token-ca-cert-hash sha256:ecdce1900bd6fe454523c7138ef27e201ff7b7a160149f9abaaaed64ed1c2ef2
七、 安装Flannel网络插件
用pod方式启动Flannel网络插件
#在master-01节点上kubectl apply -f flannel.yml
vim flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
#查看pod启动状态
[root@master ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system kube-flannel-ds-amd64-hpvf8 1/1 Running 1 2d11h
八、 问题处理
[root@master-01 ~]# systemctl status etcd -l
● etcd.service - Etcd Server
Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
Active: activating (auto-restart) (Result: exit-code) since 一 2022-01-17 08:41:08 CST; 13s ago
Docs: https://github.com/coreos
Process: 4451 ExecStart=/data/etcd/bin/etcd --name=etcd1 --cert-file=/data/etcd/ssl/server.pem --key-file=/data/etcd/ssl/server-key.pem --peer-cert-file=/data/etcd/ssl/peer.pem --peer-key-file=/data/etcd/ssl/peer-key.pem --trusted-ca-file=/data/etcd/ssl/ca.pem --peer-trusted-ca-file=/data/etcd/ssl/ca.pem --initial-advertise-peer-urls=https://10.167.45.11:2380 --listen-peer-urls=https://10.167.45.11:2380 --listen-client-urls=https://10.167.45.11:2379 --advertise-client-urls=https://10.167.45.11:2379 --initial-cluster-token=etcd-cluster-0 --initial-cluster=etcd1=https://10.167.45.11:2380,etcd2=https://10.167.45.12:2380,etcd3=https://10.167.45.14:2380 --initial-cluster-state=new --data-dir=/data/etcd --snapshot-count=50000 --auto-compaction-retention=1 --max-request-bytes=10485760 --quota-backend-bytes=8589934592 (code=exited, status=2)
Main PID: 4451 (code=exited, status=2)
1月 17 08:41:08 master-01 etcd[4451]: /home/ANT.AMAZON.COM/leegyuho/go/src/github.com/coreos/etcd/etcdmain/etcd.go:181 +0x40
1月 17 08:41:08 master-01 etcd[4451]: github.com/coreos/etcd/etcdmain.startEtcdOrProxyV2()
1月 17 08:41:08 master-01 etcd[4451]: /home/ANT.AMAZON.COM/leegyuho/go/src/github.com/coreos/etcd/etcdmain/etcd.go:102 +0x13c5
1月 17 08:41:08 master-01 etcd[4451]: github.com/coreos/etcd/etcdmain.Main()
1月 17 08:41:08 master-01 etcd[4451]: /home/ANT.AMAZON.COM/leegyuho/go/src/github.com/coreos/etcd/etcdmain/main.go:46 +0x38
1月 17 08:41:08 master-01 etcd[4451]: main.main()
1月 17 08:41:08 master-01 etcd[4451]: /home/ANT.AMAZON.COM/leegyuho/go/src/github.com/coreos/etcd/main.go:28 +0x20
1月 17 08:41:08 master-01 systemd[1]: Failed to start Etcd Server.
1月 17 08:41:08 master-01 systemd[1]: Unit etcd.service entered failed state.
1月 17 08:41:08 master-01 systemd[1]: etcd.service failed.
##错误原因
实验环境etcd文件损坏。删除数据文件后重新初始化。生产环境需要恢复备份数据。
systemctl stop etcd
systemctl stop etcd
cd /data/etcd/
ls
rm -fr member
cd /etc/kubernetes/manifests/
ls
mkdir -p /k8s/yaml
mv *.yaml /k8s/yaml
docker ps -aq
docker rm -f $(docker ps -aq)
systemctl stop kubelet
kubeadm init --config=kubeadm-init.yaml
更多推荐
所有评论(0)