二进制HA安装k8s 1.15.2

环境准备

##/etc/hosts
192.168.48.101 master01
192.168.48.102 master02
192.168.48.103 master03
192.168.48.201 node01
192.168.48.202 node02
## keepalived的vip
192.168.48.66
IPHostnameCPUMemory
192.168.48.101master0124G
192.168.48.102master0224G
192.168.48.103master0324G
192.168.48.201node0124G
192.168.48.202node0224G
软件版本
kubernetes1.15.2
docker-ce19.03
calico3.8
etcd3.3.13
CNI0.8.1
coredns1.4.0

所有节点执行

所有节点关闭selinux和firewalld

systemctl disable firewalld.service
systemctl stop firewalld.service

setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

所有节点升级内核,建议升级到4.19以上

从3.10 升级到 5.2.2
##导入升级内核的yum源
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

## 查看版本 kernel-lt指长期稳定版 kernel-ml指最新版
[root@master01 ~]# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * elrepo-kernel: mirrors.tuna.tsinghua.edu.cn
Available Packages
kernel-lt-devel.x86_64                                            4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-lt-doc.noarch                                              4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-lt-headers.x86_64                                          4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-lt-tools.x86_64                                            4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-lt-tools-libs.x86_64                                       4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-lt-tools-libs-devel.x86_64                                 4.4.186-1.el7.elrepo                                 elrepo-kernel
kernel-ml.x86_64                                                  5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-devel.x86_64                                            5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-doc.noarch                                              5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-headers.x86_64                                          5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-tools.x86_64                                            5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-tools-libs.x86_64                                       5.2.2-1.el7.elrepo                                   elrepo-kernel
kernel-ml-tools-libs-devel.x86_64                                 5.2.2-1.el7.elrepo                                   elrepo-kernel
perf.x86_64                                                       5.2.2-1.el7.elrepo                                   elrepo-kernel
python-perf.x86_64                                                5.2.2-1.el7.elrepo                                   elrepo-kernel

## 安装kernel-ml
yum -y update
yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel -y

设置grub2
## 查看系统上的所有可用内核
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg

##设置新的内核为grub2的默认版本
[root@master01 ~]# vim /etc/default/grub 
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=0
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="rd.lvm.lv=centos/root rhgb quiet"
GRUB_DISABLE_RECOVERY="true"
## 生成 grub 配置文件并重启
grub2-mkconfig -o /boot/grub2/grub.cfg
## 重启
reboot

所有节点关闭swap

cat >> /etc/sysctl.conf << EOF
vm.swappiness = 0
EOF

sysctl -p

swapoff -a

sed -ri 's/.*swap.*/#&/' /etc/fstab

所有节点开启内核转发参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF

sysctl -p

所有节点开启IPVS

yum install ipvsadm ipset jq sysstat conntrack libseccomp conntrack-tools socat -y
vim /etc/sysconfig/modules/ipvs.modules

#!/bin/bash
module=(ip_vs
        ip_vs_rr
        ip_vs_wrr
        ip_vs_sh
        ip_vs_lc
        br_netfilter
        nf_conntrack)
for kernel_module in ${module[@]};do
    /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
done


ipvs_modules_dir="/usr/lib/modules/5.2.2-1.el7.elrepo.x86_64/kernel/net/netfilter/ipvs"
for i in `ls $ipvs_modules_dir | sed  -r 's#(.*).ko#\1#'`; do
    /sbin/modinfo -F filename $i  &> /dev/null
    if [ $? -eq 0 ]; then
        /sbin/modprobe $i
    fi
done

chmod 755 /etc/sysconfig/modules/ipvs.modules
source /etc/sysconfig/modules/ipvs.modules
[root@master ~]# lsmod | grep ip_vs
ip_vs_wlc              16384  0 
ip_vs_sed              16384  0 
ip_vs_pe_sip           16384  0 
nf_conntrack_sip       32768  1 ip_vs_pe_sip
ip_vs_ovf              16384  0 
ip_vs_nq               16384  0 
ip_vs_mh               16384  0 
ip_vs_lblcr            16384  0 
ip_vs_lblc             16384  0 
ip_vs_ftp              16384  0 
nf_nat                 40960  1 ip_vs_ftp
ip_vs_fo               16384  0 
ip_vs_dh               16384  0 
ip_vs_lc               16384  0 
ip_vs_sh               16384  0 
ip_vs_wrr              16384  0 
ip_vs_rr               16384  0 
ip_vs                 151552  30 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_lblcr,ip_vs_sh,ip_vs_ovf,ip_vs_fo,ip_vs_nq,ip_vs_lblc,ip_vs_pe_sipip_vs_wrr,ip_vs_lc,ip_vs_mh,ip_vs_sed,ip_vs_ftp
nf_conntrack          139264  3 nf_nat,nf_conntrack_sip,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
libcrc32c              16384  4 nf_conntrack,nf_nat,xfs,ip_vs

所有节点安装epel源

cat > /etc/yum.repos.d/epel.repo << EOF
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7

[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1

[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
EOF

所有节点安装docker

cd /etc/yum.repos.d/ 
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce 
配置docker
mkdir /etc/docker
touch /etc/docker/daemon.json

cat > /etc/docker/daemon.json <<EOF
{
    "log-driver": "json-file",
    "exec-opts": ["native.cgroupdriver=cgroupfs"],
    "log-opts": {
    "max-size": "100m",
    "max-file": "3"
    },
    "live-restore": true,
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 10,
    "registry-mirrors": ["https://2lefsjdg.mirror.aliyuncs.com"],
    "storage-driver": "overlay2",
    "storage-opts": [
    "overlay2.override_kernel_check=true"
    ]
}
EOF
启动docker
systemctl daemon-reload 
systemctl start docker
systemctl enable docker.service 
获取pause:3.1
官方镜像 k8s.gcr.io/pause:3.1
[root@master01 ~]# docker pull gcr.azk8s.cn/google-containers/pause:3.1
3.1: Pulling from google-containers/pause
67ddbfb20a22: Already exists 
Digest: sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea
Status: Downloaded newer image for gcr.azk8s.cn/google-containers/pause:3.1
gcr.azk8s.cn/google-containers/pause:3.1

[root@master01 ~]# docker tag gcr.azk8s.cn/google-containers/pause:3.1  k8s.gcr.io/pause:3.1

所有节点安装CNI

链接: https://pan.baidu.com/s/1XugTCo-a_Uicbu_V29gd5w 提取码: sdwi

https://github.com/containernetworking/plugins/releases
wget https://github.com/containernetworking/plugins/releases/download/v0.8.1/cni-plugins-linux-amd64-v0.8.1.tgz
mkdir -p /opt/cni/bin
tar -xf cni-plugins-linux-amd64-v0.8.1.tgz  -C /opt/cni/bin/

证书创建

所有的操作都在master01的节点上进行

在master01上创建证书,然后分发到各个节点

在master01上设置k8s集群变量

[root@master01 ~]# vim /etc/profile.d/k8s_env.sh

# 声明集群成员信息
declare -A MasterArray otherMaster NodeArray AllNode Other

#master节点
MasterArray=(['master01']=192.168.48.101 ['master02']=192.168.48.102 ['master03']=192.168.48.103)

#其他master节点
otherMaster=(['master02']=192.168.48.102 ['master03']=192.168.48.103)

#node节点
NodeArray=(['node01']=192.168.48.201 ['node02']=192.168.48.202)

#所有节点
AllNode=(['master01']=192.168.48.101 ['master02']=192.168.48.102 ['master03']=192.168.48.103 ['node01']=192.168.48.201 ['node02']=192.168.48.202)

#其他节点
Other=(['master02']=192.168.48.102 ['master03']=192.168.48.103 ['node01']=192.168.48.201 ['node02']=192.168.48.202 )

#声明vip
export  VIP=192.168.48.66

[ "${#MasterArray[@]}" -eq 1 ]  && export VIP=${MasterArray[@]} || export API_PORT=8443
export KUBE_APISERVER=https://${VIP}:${API_PORT:=6443}

#声明需要安装的的k8s版本
export KUBE_VERSION=v1.15.2

#网卡名
export interface=eth0

#cni
export CNI_VERSION=v0.8.1

#etcd
export ETCD_version=v3.3.13
[root@master01 ~]# source /etc/profile.d/k8s_env.sh

所有master节点下载server端二进制包

链接: https://pan.baidu.com/s/1ZlFkJDT05xakejKeOUSu4w 提取码: 4jte

wget https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz
tar xvf kubernetes-server-linux-amd64.tar.gz
 mv kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}  /usr/local/bin/

分发master二进制文件

for name in ${!MasterArray[@]};do 
      echo "--- $name ${MasterArray[$name]} ---"
    scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} ${MasterArray[$name]}:/usr/local/bin/ 
done

准备openssl 证书配置文件

[root@master01 ~]# mkdir -p /etc/kubernetes/pki/etcd
[root@master01 ~]# cd /etc/kubernetes/pki/
 [root@master01 pki]# ll
total 0
drwxr-xr-x 2 root root 6 Aug 12 19:05 etcd

vim /etc/kubernetes/pki/openssl.cnf

[ req ]
default_bits = 2048
default_md = sha256
distinguished_name = req_distinguished_name

[req_distinguished_name]

[ v3_ca ]
basicConstraints = critical, CA:TRUE
keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign

[ v3_req_server ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth

[ v3_req_client ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth

[ v3_req_apiserver ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names_cluster

[ v3_req_etcd ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names_etcd

[ alt_names_cluster ]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = localhost
IP.1 = 10.96.0.1  ##kubernetes.default.svc.cluster.local
IP.2 = 127.0.0.1  ##localhost
IP.3 = 192.168.48.66    ##vip
IP.4 = 192.168.48.101  ##master01
IP.5 = 192.168.48.102  ##master02
IP.6 = 192.168.48.103  ##master03
IP.7 = 192.168.48.201  ##node01
IP.8 = 192.168.48.202  ##node02

[ alt_names_etcd ]
DNS.1 = localhost
IP.1 = 127.0.0.1
IP.2 = 192.168.48.101   ##master01
IP.3 = 192.168.48.102   ##master02 
IP.4 = 192.168.48.103   ##master03 

生成ca证书

pathDefault CNdescription
ca.crt,keykubernetes-caKubernetes general CA
etcd/ca.crt,keyetcd-caFor all etcd-related functions
front-proxy-ca.crt,keykubernetes-front-proxy-caFor the front-end proxy

必须得在/etc/kubernetes/pki/这个目录下建所有证书

kubernetes-ca
[root@master01 pki]# pwd
/etc/kubernetes/pki
[root@master01 pki]# openssl genrsa -out ca.key 2048
Generating RSA private key, 2048 bit long modulus
...........................+++
..................................................................................................................+++
e is 65537 (0x10001)
[root@master01 pki]# openssl req -x509 -new -nodes -key ca.key -config openssl.cnf -subj "/CN=kubernetes-ca" -extensions v3_ca -out ca.crt -days 10000
etcd-ca
openssl genrsa -out etcd/ca.key 2048
openssl req -x509 -new -nodes -key etcd/ca.key -config openssl.cnf -subj "/CN=etcd-ca" -extensions v3_ca -out etcd/ca.crt -days 10000
front-proxy-ca
openssl genrsa -out front-proxy-ca.key 2048
openssl req -x509 -new -nodes -key front-proxy-ca.key -config openssl.cnf -subj "/CN=kubernetes-ca" -extensions v3_ca -out front-proxy-ca.crt -days 10000
[root@master01 pki]# tree /etc/kubernetes/pki/
/etc/kubernetes/pki/
├── ca.crt
├── ca.key
├── etcd
│   ├── ca.crt
│   └── ca.key
├── front-proxy-ca.crt
├── front-proxy-ca.key
└── openssl.cnf

生成所有的证书信息

Default CNParent CAO (in Subject)kind
kube-etcdetcd-caserver, client
kube-etcd-peeretcd-caserver, client
kube-etcd-healthcheck-clientetcd-caclient
kube-apiserver-etcd-clientetcd-casystem:mastersclient
kube-apiserverkubernetes-caserver
kube-apiserver-kubelet-clientkubernetes-casystem:mastersclient
front-proxy-clientkubernetes-front-proxy-caclient
Default CNrecommend key pathrecommended cert pathcommandkey argumentcert argument
etcd-caetcd/ca.crtkube-apiserver–etcd-cafile
etcd-clientapiserver-etcd-client.keyapiserver-etcd-client.crtkube-apiserver–etcd-keyfile–etcd-certfile
kubernetes-caca.crtkube-apiserver–client-ca-file
kube-apiserverapiserver.keyapiserver.crtkube-apiserver–tls-private-key-file–tls-cert-file
apiserver-kubelet-clientapiserver-kubelet-client.crtkube-apiserver–kubelet-client-certificate
front-proxy-cafront-proxy-ca.crtkube-apiserver–requestheader-client-ca-file
front-proxy-clientfront-proxy-client.keyfront-proxy-client.crtkube-apiserver–proxy-client-key-file–proxy-client-cert-file
etcd-caetcd/ca.crtetcd–trusted-ca-file, –peer-trusted-ca-file
kube-etcdetcd/server.keyetcd/server.crtetcd–key-file–cert-file
kube-etcd-peeretcd/peer.keyetcd/peer.crtetcd–peer-key-file–peer-cert-file
etcd-caetcd/ca.crtetcdctl–cacert
kube-etcd-healthcheck-clientetcd/healthcheck-client.keyetcd/healthcheck-client.crtetcdctl–key–cert

必须得在/etc/kubernetes/pki/这个目录下建所有证书

apiserver-etcd-client
openssl genrsa -out apiserver-etcd-client.key 2048
openssl req -new -key apiserver-etcd-client.key -subj "/CN=apiserver-etcd-client/O=system:masters" -out apiserver-etcd-client.csr
openssl x509 -in apiserver-etcd-client.csr -req -CA etcd/ca.crt -CAkey etcd/ca.key -CAcreateserial -extensions v3_req_etcd -extfile openssl.cnf -out apiserver-etcd-client.crt -days 10000
kube-etcd
openssl genrsa -out etcd/server.key 2048
openssl req -new -key etcd/server.key -subj "/CN=etcd-server" -out etcd/server.csr
openssl x509 -in etcd/server.csr -req -CA etcd/ca.crt -CAkey etcd/ca.key -CAcreateserial -extensions v3_req_etcd -extfile openssl.cnf -out etcd/server.crt -days 10000

kube-etcd-peer
openssl genrsa -out etcd/peer.key 2048
openssl req -new -key etcd/peer.key -subj "/CN=etcd-peer" -out etcd/peer.csr
openssl x509 -in etcd/peer.csr -req -CA etcd/ca.crt -CAkey etcd/ca.key -CAcreateserial -extensions v3_req_etcd -extfile openssl.cnf -out etcd/peer.crt -days 10000

kube-etcd-healthcheck-client
openssl genrsa -out etcd/healthcheck-client.key 2048
openssl req -new -key etcd/healthcheck-client.key -subj "/CN=etcd-client" -out etcd/healthcheck-client.csr
openssl x509 -in etcd/healthcheck-client.csr -req -CA etcd/ca.crt -CAkey etcd/ca.key -CAcreateserial -extensions v3_req_etcd -extfile openssl.cnf -out etcd/healthcheck-client.crt -days 1000
kube-apiserver
openssl genrsa -out apiserver.key 2048
openssl req -new -key apiserver.key -subj "/CN=kube-apiserver" -config openssl.cnf -out apiserver.csr
openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 10000 -extensions v3_req_apiserver -extfile openssl.cnf -out apiserver.crt

apiserver-kubelet-client
openssl genrsa -out  apiserver-kubelet-client.key 2048
openssl req -new -key apiserver-kubelet-client.key -subj "/CN=apiserver-kubelet-client/O=system:masters" -out apiserver-kubelet-client.csr
openssl x509 -req -in apiserver-kubelet-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 10000 -extensions v3_req_client -extfile openssl.cnf -out apiserver-kubelet-client.crt

front-proxy-client
openssl genrsa -out  front-proxy-client.key 2048
openssl req -new -key front-proxy-client.key -subj "/CN=front-proxy-client" -out front-proxy-client.csr
openssl x509 -req -in front-proxy-client.csr -CA front-proxy-ca.crt -CAkey front-proxy-ca.key -CAcreateserial -days 10000 -extensions v3_req_client -extfile openssl.cnf -out front-proxy-client.crt

kube-scheduler
openssl genrsa -out  kube-scheduler.key 2048
openssl req -new -key kube-scheduler.key -subj "/CN=system:kube-scheduler" -out kube-scheduler.csr
openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 10000 -extensions v3_req_client -extfile openssl.cnf -out kube-scheduler.crt

sa.pub sa.key
openssl genrsa -out  sa.key 2048
openssl ecparam -name secp521r1 -genkey -noout -out sa.key
openssl ec -in sa.key -outform PEM -pubout -out sa.pub
openssl req -new -sha256 -key sa.key -subj "/CN=system:kube-controller-manager" -out sa.csr
openssl x509 -req -in sa.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 10000 -extensions v3_req_client -extfile openssl.cnf -out sa.crt

admin
openssl genrsa -out  admin.key 2048
openssl req -new -key admin.key -subj "/CN=kubernetes-admin/O=system:masters" -out admin.csr
openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 10000 -extensions v3_req_client -extfile openssl.cnf -out admin.crt
[root@master01 pki]# tree
.
├── admin.crt
├── admin.csr
├── admin.key
├── apiserver.crt
├── apiserver.csr
├── apiserver-etcd-client.crt
├── apiserver-etcd-client.csr
├── apiserver-etcd-client.key
├── apiserver.key
├── apiserver-kubelet-client.crt
├── apiserver-kubelet-client.csr
├── apiserver-kubelet-client.key
├── ca.crt
├── ca.key
├── ca.srl
├── etcd
│   ├── ca.crt
│   ├── ca.key
│   ├── ca.srl
│   ├── healthcheck-client.crt
│   ├── healthcheck-client.csr
│   ├── healthcheck-client.key
│   ├── peer.crt
│   ├── peer.csr
│   ├── peer.key
│   ├── server.crt
│   ├── server.csr
│   └── server.key
├── front-proxy-ca.crt
├── front-proxy-ca.key
├── front-proxy-ca.srl
├── front-proxy-client.crt
├── front-proxy-client.csr
├── front-proxy-client.key
├── kube-scheduler.crt
├── kube-scheduler.csr
├── kube-scheduler.key
├── openssl.cnf
├── sa.crt
├── sa.csr
├── sa.key
└── sa.pub

1 directory, 41 files

利用证书生成组件的kubeconfig

所有的操作都在master01的节点上进行

filenamecredential nameDefault CNO (in Subject)
admin.kubeconfigdefault-adminkubernetes-adminsystem:masters
controller-manager.kubeconfigdefault-controller-managersystem:kube-controller-manager
scheduler.kubeconfigdefault-managersystem:kube-scheduler

kube-controller-manager

变量
[root@master01 kubernetes]# pwd
/etc/kubernetes
[root@master01 kubernetes]# CLUSTER_NAME="kubernetes"
[root@master01 kubernetes]# KUBE_USER="system:kube-controller-manager"
[root@master01 kubernetes]# KUBE_CERT="sa"
[root@master01 kubernetes]# KUBE_CONFIG="controller-manager.kubeconfig"
设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=/etc/kubernetes/pki/ca.crt \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置客户端认证参数
kubectl config set-credentials ${KUBE_USER} \
  --client-certificate=/etc/kubernetes/pki/${KUBE_CERT}.crt \
  --client-key=/etc/kubernetes/pki/${KUBE_CERT}.key \
  --embed-certs=true \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置上下文参数
kubectl config set-context ${KUBE_USER}@${CLUSTER_NAME} \
  --cluster=${CLUSTER_NAME} \
  --user=${KUBE_USER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}

设置当前使用的上下文
[root@master01 kubernetes]# kubectl config use-context ${KUBE_USER}@${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
Switched to context "system:kube-controller-manager@kubernetes".

查看生成的配置文件
[root@master01 kubernetes]# kubectl config view --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.48.66:8443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: system:kube-controller-manager
  name: system:kube-controller-manager@kubernetes
current-context: system:kube-controller-manager@kubernetes
kind: Config
preferences: {}
users:
- name: system:kube-controller-manager
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

kube-scheduler

变量
[root@master01 kubernetes]# pwd
/etc/kubernetes
[root@master01 kubernetes]# CLUSTER_NAME="kubernetes"
[root@master01 kubernetes]# KUBE_USER="system:kube-scheduler"
[root@master01 kubernetes]# KUBE_CERT="kube-scheduler"
[root@master01 kubernetes]# KUBE_CONFIG="scheduler.kubeconfig"

设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=/etc/kubernetes/pki/ca.crt \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}

设置客户端认证参数

kubectl config set-credentials ${KUBE_USER} \
  --client-certificate=/etc/kubernetes/pki/${KUBE_CERT}.crt \
  --client-key=/etc/kubernetes/pki/${KUBE_CERT}.key \
  --embed-certs=true \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置上下文参数
kubectl config set-context ${KUBE_USER}@${CLUSTER_NAME} \
  --cluster=${CLUSTER_NAME} \
  --user=${KUBE_USER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}

设置当前使用的上下文
[root@master01 kubernetes]# kubectl config use-context ${KUBE_USER}@${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
Switched to context "system:kube-scheduler@kubernetes".

查看生成的配置文件
[root@master01 kubernetes]# kubectl config view --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.48.66:8443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: system:kube-scheduler
  name: system:kube-scheduler@kubernetes
current-context: system:kube-scheduler@kubernetes
kind: Config
preferences: {}
users:
- name: system:kube-scheduler
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

admin(kubectl)

变量
[root@master01 kubernetes]# CLUSTER_NAME="kubernetes"
[root@master01 kubernetes]# KUBE_USER="kubernetes-admin"
[root@master01 kubernetes]# KUBE_CERT="admin"
[root@master01 kubernetes]# KUBE_CONFIG="admin.kubeconfig"
设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=/etc/kubernetes/pki/ca.crt \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}

设置客户端认证参数
kubectl config set-credentials ${KUBE_USER} \
  --client-certificate=/etc/kubernetes/pki/${KUBE_CERT}.crt \
  --client-key=/etc/kubernetes/pki/${KUBE_CERT}.key \
  --embed-certs=true \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置上下文参数
kubectl config set-context ${KUBE_USER}@${CLUSTER_NAME} \
  --cluster=${CLUSTER_NAME} \
  --user=${KUBE_USER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置当前使用的上下文
[root@master01 kubernetes]# kubectl config use-context ${KUBE_USER}@${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
Switched to context "kubernetes-admin@kubernetes".

查看生成的配置文件
[root@master01 kubernetes]# kubectl config view --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.48.66:8443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

分发证书

所有的操作都在master01的节点上进行

在master01上分发到 kubeconfig文件 及证书其他 master 节点

for NODE in "${!otherMaster[@]}"; do
    echo "--- $NODE ${otherMaster[$NODE]} ---"
    scp -r /etc/kubernetes ${otherMaster[$NODE]}:/etc
done

配置ETCD

所有的操作都在master01的节点上进行

下载etcd

wget https://github.com/etcd-io/etcd/releases/download/${ETCD_version}/etcd-${ETCD_version}-linux-amd64.tar.gz

下载地址
链接: https://pan.baidu.com/s/1PqCNA9vDCD7Q7e6wfWbjsg 提取码: 64gf

[root@master01 ~]# tar -zxvf etcd-${ETCD_version}-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-${ETCD_version}-linux-amd64/etcd{,ctl}
etcd-v3.3.13-linux-amd64/etcdctl
etcd-v3.3.13-linux-amd64/etcd

在master01上分发etcd的二进制文件到其他master上

for NODE in "${!otherMaster[@]}"; do
    echo "--- $NODE ${otherMaster[$NODE]} ---"
    scp /usr/local/bin/etcd* ${otherMaster[$NODE]}:/usr/local/bin/
done

配置etcd配置模板文件

[root@master01 ~]# mkdir /etc/etcd/ -p
[root@master01 ~]# mkdir /var/lib/etcd/wal  -p
[root@master01 ~]# vim  /etc/etcd/config.yml
name: '{HOSTNAME}'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://{PUBLIC_IP}:2380'
listen-client-urls: 'https://{PUBLIC_IP}:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://{PUBLIC_IP}:2380'
advertise-client-urls: 'https://{PUBLIC_IP}:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: '{{ etcd_initial_cluster }}'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/ca.crt'
  cert-file: '/etc/kubernetes/pki/etcd/server.crt'
  key-file: '/etc/kubernetes/pki/etcd/server.key'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.crt'
  auto-tls: true
peer-transport-security:
  ca-file: '/etc/kubernetes/pki/etcd/ca.crt'
  cert-file: '/etc/kubernetes/pki/etcd/peer.crt'
  key-file: '/etc/kubernetes/pki/etcd/peer.key'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.crt'
  auto-tls: true
debug: false
log-package-levels:
log-output: default
force-new-cluster: false
[root@master01 ~]# etcd_servers=$( xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#https://#;s#$#:2379#;$s#\n##' | paste -d, -s - )
[root@master01 ~]# etcd_initial_cluster=$( for i in ${!MasterArray[@]};do  echo $i=https://${MasterArray[$i]}:2380; done | sort | paste -d, -s - )
[root@master01 ~]# sed -ri "/initial-cluster:/s#'.+'#'${etcd_initial_cluster}'#" /etc/etcd/config.yml

编辑systemd的配置文件

[root@master01 ~]# vim /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

向所有master节点分发etcd配置文件和systemd配置文件

for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} "mkdir -p /etc/etcd /var/lib/etcd"
    scp /usr/lib/systemd/system/etcd.service ${MasterArray[$NODE]}:/usr/lib/systemd/system/etcd.service
    scp /etc/etcd/config.yml ${MasterArray[$NODE]}:/etc/etcd/etcd.config.yml
    ssh ${MasterArray[$NODE]} "sed -i "s/{HOSTNAME}/$NODE/g" /etc/etcd/etcd.config.yml"
    ssh ${MasterArray[$NODE]} "sed -i "s/{PUBLIC_IP}/${MasterArray[$NODE]}/g" /etc/etcd/etcd.config.yml"
    ssh ${MasterArray[$NODE]} 'systemctl daemon-reload'
done

启动所有etcd

for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} 'systemctl enable --now etcd' 
done
wait

etcd健康检查

etcdctl \
  --cert-file /etc/kubernetes/pki/etcd/healthcheck-client.crt \
  --key-file /etc/kubernetes/pki/etcd/healthcheck-client.key \
  --ca-file /etc/kubernetes/pki/etcd/ca.crt \
   --endpoints $etcd_servers cluster-health
  
...下面是输出
member cf07d604d88be6a is healthy: got healthy result from https://192.168.48.101:2379
member 6c8995c4a94f5a29 is healthy: got healthy result from https://192.168.48.103:2379
member 8cf70d11a9c8d0c5 is healthy: got healthy result from https://192.168.48.102:2379
cluster is healthy

HA(haproxy+keepalived)

所有的操作都在master01的节点上进行

所有master节点安装HA

for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} 'yum install haproxy keepalived -y' 
done
wait

配置haproxy

[root@master01 ~]# vim /etc/haproxy/haproxy.cfg

global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin

frontend k8s-api
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-api

backend k8s-api
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100

配置 keepalived

vim /etc/keepalived/keepalived.conf

global_defs {
    enable_script_security
}

vrrp_script haproxy-check {
    user root
    script "/bin/bash /etc/keepalived/check_haproxy.sh"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance haproxy-vip {
    state BACKUP
    priority 101
    interface {{ interface }}
    virtual_router_id 47
    advert_int 3

    unicast_peer {
    }

    virtual_ipaddress {
        {{ VIP }}
    }

    track_script {
        haproxy-check
    }
}
keepalived 健康检查
[root@master01 ~]# vim /etc/keepalived/check_haproxy.sh

#!/bin/bash
VIRTUAL_IP={{ VIP }}

errorExit() {
    echo "*** $*" 1>&2
    exit 1
}

if ip addr | grep -q $VIRTUAL_IP ; then
    curl -s --max-time 2 --insecure https://${VIRTUAL_IP}:8443/ -o /dev/null || errorExit "Error GET https://${VIRTUAL_IP}:8443/"
fi

把相关配置文件配置后再分发到master节点

[root@master01 ~]# sed -i '$r '<(paste <( seq -f'  server k8s-api-%g'  ${#MasterArray[@]} ) <( xargs -n1<<<${MasterArray[@]} | sort | sed 's#$#:6443  check#')) /etc/haproxy/haproxy.cfg 
[root@master01 ~]# sed -ri "s#\{\{ VIP \}\}#${VIP}#" /etc/keepalived/*
[root@master01 ~]# sed -ri "s#\{\{ interface \}\}#${interface}#" /etc/keepalived/keepalived.conf
[root@master01 ~]# sed -i '/unicast_peer/r '<(xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#\t#') /etc/keepalived/keepalived.conf
for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    scp -r /etc/haproxy/ ${MasterArray[$NODE]}:/etc
    scp -r /etc/keepalived/ ${MasterArray[$NODE]}:/etc
    ssh ${MasterArray[$NODE]} 'systemctl enable --now haproxy keepalived'
done
重启HA
for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]}  'systemctl restart haproxy keepalived'
done

master

配置kube-apiserver.service

[root@master01 ~]# mkdir -p /etc/kubernetes/manifests /var/lib/kubelet /var/log/kubernetes
vim /usr/lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https:/github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
  --authorization-mode=Node,RBAC \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset \
  --advertise-address={{ NODE_IP }} \
  --bind-address={{ NODE_IP }}  \
  --insecure-port=0 \
  --secure-port=6443 \
  --allow-privileged=true \
  --apiserver-count={{ master_count }} \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/audit.log \
  --enable-swagger-ui=true \
  --storage-backend=etcd3 \
  --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt \
  --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt \
  --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key \
  --etcd-servers={{ etcd_servers }} \
  --event-ttl=1h \
  --enable-bootstrap-token-auth \
  --client-ca-file=/etc/kubernetes/pki/ca.crt \
  --kubelet-https \
  --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt \
  --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key \
  --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
  --runtime-config=api/all,settings.k8s.io/v1alpha1=true \
  --service-cluster-ip-range=10.96.0.0/12 \
  --service-node-port-range=30000-32767 \
  --service-account-key-file=/etc/kubernetes/pki/sa.pub \
  --tls-cert-file=/etc/kubernetes/pki/apiserver.crt \
  --tls-private-key-file=/etc/kubernetes/pki/apiserver.key \
  --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt \
  --requestheader-username-headers=X-Remote-User \
  --requestheader-group-headers=X-Remote-Group \
  --requestheader-allowed-names=front-proxy-client \
  --requestheader-extra-headers-prefix=X-Remote-Extra- \
  --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt \
  --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key \
  --feature-gates=PodShareProcessNamespace=true \
  --v=2

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

配置kube-controller-manager.service

vim /usr/lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --allocate-node-cidrs=true \
  --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authentication-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authorization-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --client-ca-file=/etc/kubernetes/pki/ca.crt \
  --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt \
  --cluster-signing-key-file=/etc/kubernetes/pki/ca.key \
  --bind-address=127.0.0.1 \
  --leader-elect=true \
  --cluster-cidr=10.244.0.0/16 \
  --service-cluster-ip-range=10.96.0.0/12 \
  --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt \
  --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
  --root-ca-file=/etc/kubernetes/pki/ca.crt \
  --use-service-account-credentials=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --experimental-cluster-signing-duration=86700h \
  --feature-gates=RotateKubeletClientCertificate=true \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

配置kube-scheduler.service

vim /usr/lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --leader-elect=true \
  --kubeconfig=/etc/kubernetes/scheduler.kubeconfig \
  --address=127.0.0.1 \
  --v=2
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

配置并分发

[root@master01 ~]# etcd_servers=$( xargs -n1<<<${MasterArray[@]} | sort | sed 's#^#https://#;s#$#:2379#;$s#\n##' | paste -d, -s - )
[root@master01 ~]# sed -ri '/--etcd-servers/s#=.+#='"$etcd_servers"' \\#' /usr/lib/systemd/system/kube-apiserver.service
[root@master01 ~]# sed -ri '/apiserver-count/s#=[^\]+#='"${#MasterArray[@]}"' #' /usr/lib/systemd/system/kube-apiserver.service

for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} 'mkdir -p /etc/kubernetes/manifests /var/lib/kubelet /var/log/kubernetes'
    scp /usr/lib/systemd/system/kube-*.service ${MasterArray[$NODE]}:/usr/lib/systemd/system/
done
for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} "sed -ri '/bind-address/s#=[^\]+#=${MasterArray[$NODE]} #' /usr/lib/systemd/system/kube-apiserver.service && sed -ri '/--advertise-address/s#=[^\]+#=${MasterArray[$NODE]} #' /usr/lib/systemd/system/kube-apiserver.service"
done

启动master组件服务并设置kubectl补全脚本

for NODE in "${!MasterArray[@]}"; do
    echo "--- $NODE ${MasterArray[$NODE]} ---"
    ssh ${MasterArray[$NODE]} "systemctl enable --now  kube-apiserver kube-controller-manager kube-scheduler;
    mkdir -p ~/.kube/;
    cp /etc/kubernetes/admin.kubeconfig ~/.kube/config;
    yum -y install bash-comp*;
    source <(kubectl completion bash);
    echo 'source <(kubectl completion bash)' >> ~/.bashrc"
done

验证master

[root@master01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   

配置 bootstrap

首先在master01建立一个变数来产生BOOTSTRAP_TOKEN,并建立bootstrap的kubeconfig文件:

接着master01建立TLS bootstrap secret来提供自动签证使用:

[root@master01 ~]# TOKEN_PUB=$(openssl rand -hex 3)
[root@master01 ~]# TOKEN_SECRET=$(openssl rand -hex 8)
[root@master01 ~]# BOOTSTRAP_TOKEN="${TOKEN_PUB}.${TOKEN_SECRET}"
kubectl -n kube-system create secret generic bootstrap-token-${TOKEN_PUB} \
        --type 'bootstrap.kubernetes.io/token' \
        --from-literal description="cluster bootstrap token" \
        --from-literal token-id=${TOKEN_PUB} \
        --from-literal token-secret=${TOKEN_SECRET} \
        --from-literal usage-bootstrap-authentication=true \
        --from-literal usage-bootstrap-signing=true

建立bootstrap的kubeconfig文件

变量
CLUSTER_NAME="kubernetes"
KUBE_USER="kubelet-bootstrap"
KUBE_CONFIG="bootstrap.kubeconfig"
设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=/etc/kubernetes/pki/ca.crt \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置客户端认证参数
kubectl config set-credentials ${KUBE_USER} \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置上下文参数
kubectl config set-context ${KUBE_USER}@${CLUSTER_NAME} \
  --cluster=${CLUSTER_NAME} \
  --user=${KUBE_USER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置当前使用的上下文
[root@master01 ~]# kubectl config use-context ${KUBE_USER}@${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
Switched to context "kubelet-bootstrap@kubernetes".

查看生成的配置文件
[root@master01 ~]# kubectl config view --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.48.66:8443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubelet-bootstrap
  name: kubelet-bootstrap@kubernetes
current-context: kubelet-bootstrap@kubernetes
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
  user:
    token: c67c25.6ad3cb8482163569

授权 kubelet 可以创建 csr

kubectl create clusterrolebinding kubeadm:kubelet-bootstrap \
        --clusterrole system:node-bootstrapper --group system:bootstrappers

允许 system:bootstrappers 组的所有 csr

cat <<EOF | kubectl apply -f -
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-csrs-for-group
subjects:
- kind: Group
  name: system:bootstrappers
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  apiGroup: rbac.authorization.k8s.io
EOF

允许 kubelet 能够更新自己的证书

cat <<EOF | kubectl apply -f -
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-renewals-for-nodes
subjects:
- kind: Group
  name: system:nodes
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  apiGroup: rbac.authorization.k8s.io
EOF

将需要用到的文件复制到所有其他节点上

for NODE in "${!Other[@]}"; do
    echo "--- $NODE ${Other[$NODE]} ---"
    ssh ${Other[$NODE]} "mkdir -p /etc/kubernetes/pki /etc/kubernetes/manifests /var/lib/kubelet/"
    for FILE in /etc/kubernetes/pki/ca.crt /etc/kubernetes/bootstrap.kubeconfig; do
      scp ${FILE} ${Other[$NODE]}:${FILE}
    done
done

node

所有node节点下载node端二进制包

链接: https://pan.baidu.com/s/1CM71rUIEZv6OqYezXS7-kQ 提取码: v479

wget https://dl.k8s.io/v1.15.2/kubernetes-node-linux-amd64.tar.gz
tar xvf kubernetes-node-linux-amd64.tar.gz
mv kubernetes/node/bin/kube{let,-proxy}  /usr/local/bin/

分发node二进制文件

for name in ${!NodeArray[@]};do 
      echo "--- $name ${NodeArray[$name]} ---"
    scp /usr/local/bin/kube{let,-proxy} ${NodeArray[$name]}:/usr/local/bin/ 
done

kubelet

所有的操作都在master01的节点上进行

配置kubelet.service

vim /etc/kubernetes/kubelet-conf.yml

address: 0.0.0.0
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kind: KubeletConfiguration
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeLeaseDurationSeconds: 40
nodeStatusReportFrequency: 1m0s
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
vim /lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet-conf.yml \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/cni/bin \
  --cert-dir=/etc/kubernetes/pki \
  --cgroup-driver=cgroupfs \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target                                 

分发kubelet.service文件和配置文件到每个节点

for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    scp /lib/systemd/system/kubelet.service ${AllNode[$NODE]}:/lib/systemd/system/kubelet.service
    scp /etc/kubernetes/kubelet-conf.yml ${AllNode[$NODE]}:/etc/kubernetes/kubelet-conf.yml
done
for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    ssh ${AllNode[$NODE]} "sed -ri '/0.0.0.0/s#\S+\$#${AllNode[$NODE]}#' /etc/kubernetes/kubelet-conf.yml"
    ssh ${AllNode[$NODE]} "sed -ri '/127.0.0.1/s#\S+\$#${AllNode[$NODE]}#' /etc/kubernetes/kubelet-conf.yml"
done

启动每个node节点的kubelet 服务

for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    ssh ${AllNode[$NODE]} 'systemctl enable --now kubelet.service'
done

验证集群

[root@master01 ~]# kubectl get nodes
NAME       STATUS     ROLES    AGE   VERSION
master01   NotReady   <none>   13m   v1.15.2
master02   NotReady   <none>   82s   v1.15.2
master03   NotReady   <none>   92s   v1.15.2
node01     NotReady   <none>   91s   v1.15.2
node02     NotReady   <none>   82s   v1.15.2
[root@master01 ~]# kubectl get csr
NAME        AGE   REQUESTOR                 CONDITION
csr-b9n6l   26m   system:bootstrap:c67c25   Approved,Issued
csr-f5q7b   26m   system:bootstrap:c67c25   Approved,Issued
csr-kqzjn   26m   system:bootstrap:c67c25   Approved,Issued
csr-rq4m6   26m   system:bootstrap:c67c25   Approved,Issued
csr-vz6qk   25m   system:bootstrap:c67c25   Approved,Issued

打标签声明role

[root@master01 ~]# kubectl taint nodes ${!MasterArray[@]} node-role.kubernetes.io/master="":NoSchedule
[root@master01 ~]# kubectl label node ${!MasterArray[@]} node-role.kubernetes.io/master=""
node/master02 labeled
node/master03 labeled
node/master01 labeled
[root@master01 ~]# kubectl label node ${!NodeArray[@]} node-role.kubernetes.io/node=""
node/node01 labeled
node/node02 labeled
[root@master01 ~]# kubectl get nodes
NAME       STATUS     ROLES    AGE   VERSION
master01   NotReady   master   33m   v1.15.2
master02   NotReady   master   21m   v1.15.2
master03   NotReady   master   21m   v1.15.2
node01     NotReady   node     21m   v1.15.2
node02     NotReady   node     21m   v1.15.2

kube-proxy

所有的操作都在master01的节点上进行

配置kube-proxy

创建一个 kube-proxy 的 service account
[root@master01 ~]# kubectl -n kube-system create serviceaccount kube-proxy
serviceaccount/kube-proxy created
将 kube-proxy 的 serviceaccount 绑定到 clusterrole system:node-proxier以允许 RBAC
kubectl create clusterrolebinding kubeadm:kube-proxy \
        --clusterrole system:node-proxier \
        --serviceaccount kube-system:kube-proxy

创建kube-proxy的kubeconfig

变量
CLUSTER_NAME="kubernetes"
KUBE_CONFIG="kube-proxy.kubeconfig"

SECRET=$(kubectl -n kube-system get sa/kube-proxy \
    --output=jsonpath='{.secrets[0].name}')

JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
    --output=jsonpath='{.data.token}' | base64 -d)
设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} \
  --certificate-authority=/etc/kubernetes/pki/ca.crt \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置客户端认证参数
kubectl config set-credentials ${CLUSTER_NAME} \
  --token=${JWT_TOKEN} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置上下文参数
kubectl config set-context ${CLUSTER_NAME} \
  --cluster=${CLUSTER_NAME} \
  --user=${CLUSTER_NAME} \
  --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
设置当前使用的上下文
kubectl config use-context ${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
查看生成的配置文件
[root@master01 ~]# kubectl config view --kubeconfig=/etc/kubernetes/${KUBE_CONFIG}
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.48.66:8443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes
  name: kubernetes
current-context: kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes
  user:
    token: eyJhbGciOiJFUzUxMiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlLXByb3h5LXRva2VuLXp4YzY5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmUtcHJveHkiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIzNzJlYzZhNC00YWMxLTRkNzktYWZkZi1lYzY4N2ZmMmViZTkiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06a3ViZS1wcm94eSJ9.AWCZMSIO2JqfAHNT-ke4U5IOvL0lH8iKNIVtYkHAHJsR3ekbR7mLdKaoyIvpamukTTM9F7_rxH1AESLnChNzr68QABR3pTD4Ostb-d9I9RcgPX-nt5uj2AHmu737F48UJSRl51lJA9GQRN2Bn9vl7KXcXDPelAlD59a9yhQO4dmBO5T0

配置kube-proxy.service

vim /etc/kubernetes/kube-proxy.conf

apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
    acceptContentTypes: ""
    burst: 10
    contentType: application/vnd.kubernetes.protobuf
    kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
    qps: 5
clusterCIDR: "10.244.0.0/16"
configSyncPeriod: 15m0s
conntrack:
    max: null
    maxPerCore: 32768
    min: 131072
    tcpCloseWaitTimeout: 1h0m0s
    tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
    masqueradeAll: true
    masqueradeBit: 14
    minSyncPeriod: 0s
    syncPeriod: 30s
ipvs:
    excludeCIDRs: null
    minSyncPeriod: 0s
    scheduler: ""
    syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
resourceContainer: /kube-proxy
udpIdleTimeout: 250ms
vim /usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

分发kube-proxy的 相关文件到所有节点

for NODE in "${!Other[@]}"; do
    echo "--- $NODE ${Other[$NODE]} ---"
    scp /etc/kubernetes/kube-proxy.kubeconfig ${Other[$NODE]}:/etc/kubernetes/kube-proxy.kubeconfig
done
for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    scp /etc/kubernetes/kube-proxy.conf ${AllNode[$NODE]}:/etc/kubernetes/kube-proxy.conf
    scp /usr/lib/systemd/system/kube-proxy.service ${AllNode[$NODE]}:/usr/lib/systemd/system/kube-proxy.service
done
for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    ssh ${AllNode[$NODE]} "sed -ri '/0.0.0.0/s#\S+\$#${AllNode[$NODE]}#' /etc/kubernetes/kube-proxy.conf"
done

启动所有节点的kube-proxy服务:

for NODE in "${!AllNode[@]}"; do
    echo "--- $NODE ${AllNode[$NODE]} ---"
    ssh ${AllNode[$NODE]} 'systemctl enable --now kube-proxy'
done
[root@master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.48.101:6443          Masq    1      0          0         
  -> 192.168.48.102:6443          Masq    1      0          0         
  -> 192.168.48.103:6443          Masq    1      0          0   

Calico

下载yaml

wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml

下载地址
链接: https://pan.baidu.com/s/171klIl79QWKN2n4VQokOwQ 提取码: 52r9

镜像下载

链接: https://pan.baidu.com/s/1d_avGTEde-RfzmWkh2RyNQ 提取码: cs9b

calico/cni:v3.8.1
calico/pod2daemon-flexvol:v3.8.1
calico/node:v3.8.1
calico/kube-controllers:v3.8.1

修改pod网段

[root@master01 ~]# sed -i -e "s?192.168.0.0/16?10.244.0.0/16?g" calico.yaml

加载calico.yaml

kubectl apply -f calico.yaml
[root@master01 ~]# kubectl get pod --all-namespaces  -o wide -w
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP               NODE       NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-7bd78b474d-r2tdd   1/1     Running   0          3m41s   10.244.235.1     master03   <none>           <none>
kube-system   calico-node-cfckb                          1/1     Running   0          3m40s   192.168.48.103   master03   <none>           <none>
kube-system   calico-node-kzqg6                          1/1     Running   0          3m40s   192.168.48.102   master02   <none>           <none>
kube-system   calico-node-msmv2                          1/1     Running   0          3m40s   192.168.48.202   node02     <none>           <none>
kube-system   calico-node-p2kvc                          1/1     Running   0          3m40s   192.168.48.201   node01     <none>           <none>
kube-system   calico-node-q8tr4                          1/1     Running   0          3m40s   192.168.48.101   master01   <none>           <none>

[root@master01 ~]# kubectl get nodes
NAME       STATUS   ROLES    AGE    VERSION
master01   Ready    master   121m   v1.15.2
master02   Ready    master   108m   v1.15.2
master03   Ready    master   109m   v1.15.2
node01     Ready    node     109m   v1.15.2
node02     Ready    node     108m   v1.15.2

coredns

coredns.yaml文件

vim coredns.yml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.4.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: host-time
          mountPath: /etc/localtime
          readOnly: true
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: host-time
          hostPath:
            path: /etc/localtime
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

加载coredns.yaml

[root@master01 ~]# kubectl apply  -f coredns.yaml 
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

验证

先创建一个测试的pod

cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
[root@master01 ~]# kubectl exec -ti busybox -- nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

[root@master01 ~]# kubectl exec -ti busybox -- ping www.baidu.com
PING www.baidu.com (182.61.200.7): 56 data bytes
64 bytes from 182.61.200.7: seq=0 ttl=127 time=5.203 ms
64 bytes from 182.61.200.7: seq=1 ttl=127 time=3.559 ms
64 bytes from 182.61.200.7: seq=2 ttl=127 time=3.852 ms
64 bytes from 182.61.200.7: seq=3 ttl=127 time=3.331 ms
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐