Linux部署k8s集群
linux部署k8s集群并延长授权书期限
·
linux部署k8s环境
文章目录
一、环境准备
- IP机器名环境要求
部署一主两从集群环境
IP | 名称 | CPU | MEMORY |
---|---|---|---|
192.168.18.30 | master | CPU>=2核 | MEMORY>=2G |
192.168.18.31 | node1 | CPU>=2核 | MEMORY>=2G |
192.168.18.32 | node2 | CPU>=2核 | MEMORY>=2G |
- 固定IP[每台机器根据IP变化,其余不需要改动]
vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="no"
IPV6_DEFROUTE="no"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="e5106d7d-0a53-4c93-a910-db64a032a127"
DEVICE="ens33"
ONBOOT="yes"
IPADDR=192.168.18.30
NETMASK=255.255.255.0
GATEWAY=192.168.18.1
DNS=8.8.8.8
- 重启网络
systemctl restart network
二、Centos7【每台机器都做】
- 安装阿里镜像源
yum -y install wget
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache
- 更新centos
yum update -y
- 安装lrzsz
yum -y install lrzsz
- 永久关闭Selinux
vim /etc/sysconfig/selinux
- 关闭防火墙
firewall-cmd --state && systemctl stop firewalld && systemctl disable firewalld
- 时间校对
yum install -y ntp
ntpdate cn.pool.ntp.org
- 永久关闭swap
swapoff -a
vim /etc/fstab
sysctl -p && free -m
- 加入主机名
vim /etc/hosts
192.168.18.30 master
192.168.18.31 node1
192.168.18.32 node2
- 修改主机名【根据不同机器名字不同】
hostnamectl set-hostname master
- 修改内核
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
执行命令使修改生效
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
- kube-proxy开启ipvs的前置条件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
安装ipset软件包
yum install ipset
为了便于查看ipvs的代理规则,安装管理工具ipvsadm
yum install ipvsadm
确认一下iptables filter表中FOWARD链的默认策略(policy)为ACCEPT
iptables -nvL
如果不是ACCEPT,则修改
iptables -P FORWARD ACCEPT
三、安装docker【每台机器都做】
- 安装依赖
yum install -y yum-utils device-mapper-persistent-data lvm2
- 添加软件源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
- 安装docker-ce
yum clean all && yum makecache fast
yum -y install docker-ce-18.09.8-3.el7
- 启动服务
systemctl start docker && systemctl enable docker
- 设置daemon.json文件
mkdir -p /etc/docker
vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [ "https://registry.docker-cn.com"],
"graph": "/home/lib/docker"
}
- 重启docker
systemctl daemon-reload && systemctl restart docker
- 查看安装版本
docker version
四、安装 kubelet kubeadm kubectl【每台机器都做】
- 使用阿里镜像安装
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3
systemctl daemon-reload && systemctl enable kubelet.service
- 编写脚本
vim pullimages.sh
#!/bin/bash
KUBE_VERSION=v1.16.3
KUBE_PAUSE_VERSION=3.1
ETCD_VERSION=3.3.15-0
DNS_VERSION=1.6.2
username=registry.cn-hangzhou.aliyuncs.com/google_containers
images=(kube-proxy:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-apiserver:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION}
coredns:${DNS_VERSION} )
for image in ${images[@]}
do
docker pull ${username}/${image}
docker tag ${username}/${image} k8s.gcr.io/${image}
#docker tag ${username}/${image} gcr.io/google_containers/${image}
docker rmi ${username}/${image}
done
- 授权并执行脚本
chmod +x pullimages.sh && ./pullimages.sh
五、使用kubeadm 创建cluster(只在master节点执行)
- 重置集群环境【这步只有集群出现问题才做,正常情况忽略】
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm --clear
rm -f $HOME/.kube/config
- 初始化
只需要修改master节点IP
kubeadm init --kubernetes-version=1.16.3 --apiserver-advertise-address=192.168.18.30 --pod-network-cidr=10.244.0.0/16
- 依次执行
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
- 安装flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i 's@quay.io/coreos@quay.azk8s.cn/coreos@g' kube-flannel.yml
kubectl apply -f kube-flannel.yml
systemctl enable kubelet && systemctl start kubelet
- 可以查看pod是否正常运行,Running表示正常运行
kubectl get pod -o wide -n kube-system
可以离线安装flannel
kubectl apply -f kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- 查看node状态
kubectl get nodes
六、节点加入(只在Node节点执行)
- 重置集群【只有集群出现问题,正常情况忽略】
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm --clear
rm -f $HOME/.kube/config
- 加入集群
由master初始化时得到,需要在每个node节点执行
七、kube-proxy开启ipvs代替iptables(只在master上执行)
- mode原来是空,默认为iptables模式,改为ipvs
scheduler默认是空,默认负载均衡算法为轮训
kubectl edit configmap kube-proxy -n kube-system
- 删除所有kube-proxy的pod
// 查看命名空间为kube-system下的pod状态
kubectl get pod -n kube-system
// 删除xxxpod
kubectl delete pod xxx -n kube-system
kubectl get pod -n kube-system
再次出现的pod不用删除
八、延长k8s证书时间【master节点执行】
k8s默认安装授权书期限为一年,对于生产环境比较不适用,需要延长期限
- 编写延长期限10年的脚本
vim update-kubeadm-cert.sh
#!/bin/bash
#脚本转载自https://github.com/yuyicai/update-kube-cert
set -o errexit
set -o pipefail
# set -o xtrace
log::err() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$@\n"
}
log::info() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[32mINFO: \033[0m$@\n"
}
log::warning() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[33mWARNING: \033[0m$@\n"
}
check_file() {
if [[ ! -r ${1} ]]; then
log::err "can not find ${1}"
exit 1
fi
}
# get x509v3 subject alternative name from the old certificate
cert::get_subject_alt_name() {
local cert=${1}.crt
check_file "${cert}"
local alt_name=$(openssl x509 -text -noout -in ${cert} | grep -A1 'Alternative' | tail -n1 | sed 's/[[:space:]]*Address//g')
printf "${alt_name}\n"
}
# get subject from the old certificate
cert::get_subj() {
local cert=${1}.crt
check_file "${cert}"
local subj=$(openssl x509 -text -noout -in ${cert} | grep "Subject:" | sed 's/Subject:/\//g;s/\,/\//;s/[[:space:]]//g')
printf "${subj}\n"
}
cert::backup_file() {
local file=${1}
if [[ ! -e ${file}.old-$(date +%Y%m%d) ]]; then
cp -rp ${file} ${file}.old-$(date +%Y%m%d)
log::info "backup ${file} to ${file}.old-$(date +%Y%m%d)"
else
log::warning "does not backup, ${file}.old-$(date +%Y%m%d) already exists"
fi
}
# generate certificate whit client, server or peer
# Args:
# $1 (the name of certificate)
# $2 (the type of certificate, must be one of client, server, peer)
# $3 (the subject of certificates)
# $4 (the validity of certificates) (days)
# $5 (the x509v3 subject alternative name of certificate when the type of certificate is server or peer)
cert::gen_cert() {
local cert_name=${1}
local cert_type=${2}
local subj=${3}
local cert_days=${4}
local alt_name=${5}
local cert=${cert_name}.crt
local key=${cert_name}.key
local csr=${cert_name}.csr
local csr_conf="distinguished_name = dn\n[dn]\n[v3_ext]\nkeyUsage = critical, digitalSignature, keyEncipherment\n"
check_file "${key}"
check_file "${cert}"
# backup certificate when certificate not in ${kubeconf_arr[@]}
# kubeconf_arr=("controller-manager.crt" "scheduler.crt" "admin.crt" "kubelet.crt")
# if [[ ! "${kubeconf_arr[@]}" =~ "${cert##*/}" ]]; then
# cert::backup_file "${cert}"
# fi
case "${cert_type}" in
client)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
server)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
peer)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
*)
log::err "unknow, unsupported etcd certs type: ${cert_type}, supported type: client, server, peer"
exit 1
esac
rm -f ${csr}
}
cert::update_kubeconf() {
local cert_name=${1}
local kubeconf_file=${cert_name}.conf
local cert=${cert_name}.crt
local key=${cert_name}.key
# generate certificate
check_file ${kubeconf_file}
# get the key from the old kubeconf
grep "client-key-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${key}
# get the old certificate from the old kubeconf
grep "client-certificate-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${cert}
# get subject from the old certificate
local subj=$(cert::get_subj ${cert_name})
cert::gen_cert "${cert_name}" "client" "${subj}" "${CAER_DAYS}"
# get certificate base64 code
local cert_base64=$(base64 -w 0 ${cert})
# backup kubeconf
# cert::backup_file "${kubeconf_file}"
# set certificate base64 code to kubeconf
sed -i 's/client-certificate-data:.*/client-certificate-data: '${cert_base64}'/g' ${kubeconf_file}
log::info "generated new ${kubeconf_file}"
rm -f ${cert}
rm -f ${key}
# set config for kubectl
if [[ ${cert_name##*/} == "admin" ]]; then
mkdir -p ~/.kube
cp -fp ${kubeconf_file} ~/.kube/config
log::info "copy the admin.conf to ~/.kube/config for kubectl"
fi
}
cert::update_etcd_cert() {
PKI_PATH=${KUBE_PATH}/pki/etcd
CA_CERT=${PKI_PATH}/ca.crt
CA_KEY=${PKI_PATH}/ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
# generate etcd server certificate
# /etc/kubernetes/pki/etcd/server
CART_NAME=${PKI_PATH}/server
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-server" "${CAER_DAYS}" "${subject_alt_name}"
# generate etcd peer certificate
# /etc/kubernetes/pki/etcd/peer
CART_NAME=${PKI_PATH}/peer
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-peer" "${CAER_DAYS}" "${subject_alt_name}"
# generate etcd healthcheck-client certificate
# /etc/kubernetes/pki/etcd/healthcheck-client
CART_NAME=${PKI_PATH}/healthcheck-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-etcd-healthcheck-client" "${CAER_DAYS}"
# generate apiserver-etcd-client certificate
# /etc/kubernetes/pki/apiserver-etcd-client
check_file "${CA_CERT}"
check_file "${CA_KEY}"
PKI_PATH=${KUBE_PATH}/pki
CART_NAME=${PKI_PATH}/apiserver-etcd-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-etcd-client" "${CAER_DAYS}"
# restart etcd
docker ps | awk '/k8s_etcd/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted etcd"
}
cert::update_master_cert() {
PKI_PATH=${KUBE_PATH}/pki
CA_CERT=${PKI_PATH}/ca.crt
CA_KEY=${PKI_PATH}/ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
# generate apiserver server certificate
# /etc/kubernetes/pki/apiserver
CART_NAME=${PKI_PATH}/apiserver
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "server" "/CN=kube-apiserver" "${CAER_DAYS}" "${subject_alt_name}"
# generate apiserver-kubelet-client certificate
# /etc/kubernetes/pki/apiserver-kubelet-client
CART_NAME=${PKI_PATH}/apiserver-kubelet-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-kubelet-client" "${CAER_DAYS}"
# generate kubeconf for controller-manager,scheduler,kubectl and kubelet
# /etc/kubernetes/controller-manager,scheduler,admin,kubelet.conf
cert::update_kubeconf "${KUBE_PATH}/controller-manager"
cert::update_kubeconf "${KUBE_PATH}/scheduler"
cert::update_kubeconf "${KUBE_PATH}/admin"
# check kubelet.conf
# https://github.com/kubernetes/kubeadm/issues/1753
set +e
grep kubelet-client-current.pem /etc/kubernetes/kubelet.conf > /dev/null 2>&1
kubelet_cert_auto_update=$?
set -e
if [[ "$kubelet_cert_auto_update" == "0" ]]; then
log::warning "does not need to update kubelet.conf"
else
cert::update_kubeconf "${KUBE_PATH}/kubelet"
fi
# generate front-proxy-client certificate
# use front-proxy-client ca
CA_CERT=${PKI_PATH}/front-proxy-ca.crt
CA_KEY=${PKI_PATH}/front-proxy-ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
CART_NAME=${PKI_PATH}/front-proxy-client
cert::gen_cert "${CART_NAME}" "client" "/CN=front-proxy-client" "${CAER_DAYS}"
# restart apiserve, controller-manager, scheduler and kubelet
docker ps | awk '/k8s_kube-apiserver/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-apiserver"
docker ps | awk '/k8s_kube-controller-manager/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-controller-manager"
docker ps | awk '/k8s_kube-scheduler/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-scheduler"
systemctl restart kubelet
log::info "restarted kubelet"
}
main() {
local node_tpye=$1
KUBE_PATH=/etc/kubernetes
CAER_DAYS=3650
# backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
cert::backup_file "${KUBE_PATH}"
case ${node_tpye} in
etcd)
# update etcd certificates
cert::update_etcd_cert
;;
master)
# update master certificates and kubeconf
cert::update_master_cert
;;
all)
# update etcd certificates
cert::update_etcd_cert
# update master certificates and kubeconf
cert::update_master_cert
;;
*)
log::err "unknow, unsupported certs type: ${cert_type}, supported type: all, etcd, master"
printf "Documentation: https://github.com/yuyicai/update-kube-cert
example:
'\033[32m./update-kubeadm-cert.sh all\033[0m' update all etcd certificates, master certificates and kubeconf
/etc/kubernetes
├── admin.conf
├── controller-manager.conf
├── scheduler.conf
├── kubelet.conf
└── pki
├── apiserver.crt
├── apiserver-etcd-client.crt
├── apiserver-kubelet-client.crt
├── front-proxy-client.crt
└── etcd
├── healthcheck-client.crt
├── peer.crt
└── server.crt
'\033[32m./update-kubeadm-cert.sh etcd\033[0m' update only etcd certificates
/etc/kubernetes
└── pki
├── apiserver-etcd-client.crt
└── etcd
├── healthcheck-client.crt
├── peer.crt
└── server.crt
'\033[32m./update-kubeadm-cert.sh master\033[0m' update only master certificates and kubeconf
/etc/kubernetes
├── admin.conf
├── controller-manager.conf
├── scheduler.conf
├── kubelet.conf
└── pki
├── apiserver.crt
├── apiserver-kubelet-client.crt
└── front-proxy-client.crt
"
exit 1
esac
}
main "$@"
- 赋权与执行
chmod +x update-kubeadm-cert.sh
./update-kubeadm-cert.sh all
- 查看证书是否签发正常
kubectl get pods -n kube-system
- 重启kubelet
systemctl restart kubelet
- 验证证书有效时间是否延长到10年
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text | grep Not
- 查看其他证书是否都延长
kubeadm alpha certs check-expiration
更多推荐
已为社区贡献1条内容
所有评论(0)