k8s之环境部署
参考文章。
1. 主机规划
k8s-master节点:192.168.200.141
k8s-node1节点:192.168.200.142
k8s-node2节点:192.168.200.143
2. 环境部署
2.1 配置3台机器ssh可信连接
我们在master节点上操作:
ssh-keygen -t rsa
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.200.142
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.200.143
#1.生成密钥
[root@k8s-node1 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:D4EZU2i0oe8rmAWkG6faKxaXeDdig3QAs25BoyUziCo root@k8s-node1
The key's randomart image is:
+---[RSA 2048]----+
|Xo. .=o. |
|=Oo .oB |
|++. ..+ . |
|E.+. . . |
|oB+.. . S |
|++ B.+ o |
|..=++ o . |
|o.+ . . |
|.... .. |
+----[SHA256]-----+
#2.拷贝公钥到节点主机
[root@k8s-node1 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@192.168.200.142
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
The authenticity of host '192.168.200.142 (192.168.200.142)' can't be established.
ECDSA key fingerprint is SHA256:aGpSOVNCbI3Ohc+kugDYkfCmHqWsNnJjEtMgUwMJS/c.
ECDSA key fingerprint is MD5:5a:52:02:be:72:f9:3f:0b:8d:b8:4c:61:ee:fb:23:94.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@192.168.200.142's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.200.142'"
and check to make sure that only the key(s) you wanted were added.
在另外2台节点机器相互执行上面2行命令
2.2 关闭所有节点的防火墙、selinux、dnsmasq、swap
systemctl stop firewalld
systemctl disable --now firewalld
systemctl disable --now dnsmasq
setenforce 0
sed -ri "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
[root@k8s-master ~]# systemctl stop firewalld
[root@k8s-master ~]# systemctl disable --now firewalld
[root@k8s-master ~]# systemctl disable --now dnsmasq
[root@k8s-master ~]# setenforce 0
[root@k8s-master ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
[root@k8s-master ~]# getenforce
Permissive
2.3 关闭swap
swapoff -a && sysctl -w vm.swappiness=0
sed -ri "/swap/s/^.*$/#&/g" /etc/fstab
[root@k8s-master ~]# swapoff -a
[root@k8s-master ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@k8s-master ~]# echo "vm.swappiness=0" >> /etc/sysctl.conf
[root@k8s-master ~]# sysctl -p
vm.swappiness = 0
[root@k8s-master ~]#
2.4 时间同步
yum -y install ntpdate
crontab -e
0 */1 * * * ntpdate time1.aliyun.com
[root@k8s-master ~]# yum -y install ntpdate
Loaded plugins: fastestmirror, langpacks
Determining fastest mirrors
* base: mirrors.aliyun.com
* extras: mirrors.nju.edu.cn
* updates: mirrors.nju.edu.cn
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
updates | 2.9 kB 00:00:00
updates/7/x86_64/primary_db | 23 MB 00:00:38
Package ntpdate-4.2.6p5-29.el7.centos.2.x86_64 already installed and latest version
Nothing to do
[root@k8s-master ~]# crontab -e
no crontab for root - using an empty one
crontab: installing new crontab
[root@k8s-master ~]# crontab -l
0 */1 * * * ntpdate time1.aliyun.com
2.5 主机系统优化
limit优化:ulimit -SHn 65535
cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
[root@k8s-master ~]# ulimit -SHn 65535
[root@k8s-master ~]# cat <<EOF >> /etc/security/limits.conf
> * soft nofile 655360
> * hard nofile 131072
> * soft nproc 655350
> * hard nproc 655350
> * soft memlock unlimited
> * hard memlock unlimited
> EOF
2.6 ipvs管理工具安装&&模块加载
yum -y install ipvsadm ipset sysstat conntrack libseccomp
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
tips:在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可
cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl enable --now systemd-modules-load.service
lsmod |grep -e ip_vs -e nf_conntrack
永久加载模块:cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
[root@k8s-master ~]# vim /etc/modules-load.d/ipvs.conf
[root@k8s-master ~]# systemctl enable --now systemd-modules-load.service
[root@k8s-master ~]# lsmod |grep -e ip_vs -e nf_conntrack
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
nf_conntrack 139264 1 nf_conntrack_ipv4
libcrc32c 12644 2 xfs,nf_conntrack
[root@k8s-master ~]# cat > /etc/modules-load.d/containerd.conf << EOF
> overlay
> br_netfilter
> EOF
2.7 内核优化
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
reboot
[root@k8s-master ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
> net.ipv4.ip_forward = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.bridge.bridge-nf-call-ip6tables = 1
> fs.may_detach_mounts = 1
> vm.overcommit_memory=1
> vm.panic_on_oom=0
> fs.inotify.max_user_watches=89100
> fs.file-max=52706963
> fs.nr_open=52706963
> net.netfilter.nf_conntrack_max=2310720
>
> net.ipv4.tcp_keepalive_time = 600
> net.ipv4.tcp_keepalive_probes = 3
> net.ipv4.tcp_keepalive_intvl =15
> net.ipv4.tcp_max_tw_buckets = 36000
> net.ipv4.tcp_tw_reuse = 1
> net.ipv4.tcp_max_orphans = 327680
> net.ipv4.tcp_orphan_retries = 3
> net.ipv4.tcp_syncookies = 1
> net.ipv4.tcp_max_syn_backlog = 16384
> net.ipv4.ip_conntrack_max = 131072
> net.ipv4.tcp_max_syn_backlog = 16384
> net.ipv4.tcp_timestamps = 0
> net.core.somaxconn = 16384
> EOF
[root@k8s-master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/60-libvirtd.conf ...
fs.aio-max-nr = 1048576
* Applying /etc/sysctl.d/99-sysctl.conf ...
vm.swappiness = 0
* Applying /etc/sysctl.d/k8s.conf ...
net.ipv4.ip_forward = 1
fs.may_detach_mounts = 1
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
* Applying /etc/sysctl.conf ...
vm.swappiness = 0
[root@k8s-master ~]# reboot
2.8 修改docker driver
修改docker driver为systemd并重启docker
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts":{
"max-size": "100m"
}
}
EOF
systemctl daemon-reload
systemctl restart docker
3.kubeadm部署
3.1 配置k8s源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
3.2 安装k8s组件
yum install -y kubelet-1.15.0 kubeadm-1.15.0 kubectl-1.15.0
systemctl enable kubelet
[root@k8s-master ~]# yum install -y kubelet-1.15.0 kubeadm-1.15.0 kubectl-1.15.0
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.ustc.edu.cn
* updates: mirrors.ustc.edu.cn
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.15.0-0 will be installed
--> Processing Dependency: kubernetes-cni >= 0.7.5 for package: kubeadm-1.15.0-0.x86_64
--> Processing Dependency: cri-tools >= 1.11.0 for package: kubeadm-1.15.0-0.x86_64
---> Package kubectl.x86_64 0:1.15.0-0 will be installed
---> Package kubelet.x86_64 0:1.15.0-0 will be installed
--> Processing Dependency: socat for package: kubelet-1.15.0-0.x86_64
--> Running transaction check
---> Package cri-tools.x86_64 0:1.26.0-0 will be installed
---> Package kubernetes-cni.x86_64 0:1.2.0-0 will be installed
---> Package socat.x86_64 0:1.7.3.2-2.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
=======================================================================================================================================================================================================================================
Package Arch Version Repository Size
=======================================================================================================================================================================================================================================
Installing:
kubeadm x86_64 1.15.0-0 kubernetes 8.9 M
kubectl x86_64 1.15.0-0 kubernetes 9.5 M
kubelet x86_64 1.15.0-0 kubernetes 22 M
Installing for dependencies:
cri-tools x86_64 1.26.0-0 kubernetes 8.6 M
kubernetes-cni x86_64 1.2.0-0 kubernetes 17 M
socat x86_64 1.7.3.2-2.el7 base 290 k
Transaction Summary
=======================================================================================================================================================================================================================================
Install 3 Packages (+3 Dependent packages)
Total download size: 66 M
Installed size: 283 M
Downloading packages:
warning: /var/cache/yum/x86_64/7/kubernetes/packages/3f5ba2b53701ac9102ea7c7ab2ca6616a8cd5966591a77577585fde1c434ef74-cri-tools-1.26.0-0.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 3e1ba8d5: NOKEYMB/s | 12 MB 00:00:48 ETA
Public key for 3f5ba2b53701ac9102ea7c7ab2ca6616a8cd5966591a77577585fde1c434ef74-cri-tools-1.26.0-0.x86_64.rpm is not installed
(1/6): 3f5ba2b53701ac9102ea7c7ab2ca6616a8cd5966591a77577585fde1c434ef74-cri-tools-1.26.0-0.x86_64.rpm | 8.6 MB 00:00:10
(2/6): 3d5dd3e6a783afcd660f9954dec3999efa7e498cac2c14d63725fafa1b264f14-kubectl-1.15.0-0.x86_64.rpm | 9.5 MB 00:00:11
(3/6): 7143f62ad72a1eb1849d5c1e9490567d405870d2c00ab2b577f1f3bdf9f547ba-kubeadm-1.15.0-0.x86_64.rpm | 8.9 MB 00:00:23
(4/6): socat-1.7.3.2-2.el7.x86_64.rpm | 290 kB 00:00:03
(5/6): 557c2f4e11a3ab262c72a52d240f2f440c63f539911ff5e05237904893fc36bb-kubelet-1.15.0-0.x86_64.rpm | 22 MB 00:00:26
(6/6): 0f2a2afd740d476ad77c508847bad1f559afc2425816c1f2ce4432a62dfe0b9d-kubernetes-cni-1.2.0-0.x86_64.rpm | 17 MB 00:00:32
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Total 1.2 MB/s | 66 MB 00:00:55
Retrieving key from https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
Importing GPG key 0x13EDEF05:
Userid : "Rapture Automatic Signing Key (cloud-rapture-signing-key-2022-03-07-08_01_01.pub)"
Fingerprint: a362 b822 f6de dc65 2817 ea46 b53d c80d 13ed ef05
From : https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
Retrieving key from https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
Importing GPG key 0x3E1BA8D5:
Userid : "Google Cloud Packages RPM Signing Key <gc-team@google.com>"
Fingerprint: 3749 e1ba 95a8 6ce0 5454 6ed2 f09c 394c 3e1b a8d5
From : https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : cri-tools-1.26.0-0.x86_64 1/6
Installing : socat-1.7.3.2-2.el7.x86_64 2/6
Installing : kubernetes-cni-1.2.0-0.x86_64 3/6
Installing : kubelet-1.15.0-0.x86_64 4/6
Installing : kubectl-1.15.0-0.x86_64 5/6
Installing : kubeadm-1.15.0-0.x86_64 6/6
Verifying : kubeadm-1.15.0-0.x86_64 1/6
Verifying : kubelet-1.15.0-0.x86_64 2/6
Verifying : kubernetes-cni-1.2.0-0.x86_64 3/6
Verifying : kubectl-1.15.0-0.x86_64 4/6
Verifying : socat-1.7.3.2-2.el7.x86_64 5/6
Verifying : cri-tools-1.26.0-0.x86_64 6/6
Installed:
kubeadm.x86_64 0:1.15.0-0 kubectl.x86_64 0:1.15.0-0 kubelet.x86_64 0:1.15.0-0
Dependency Installed:
cri-tools.x86_64 0:1.26.0-0 kubernetes-cni.x86_64 0:1.2.0-0 socat.x86_64 0:1.7.3.2-2.el7
Complete!
[root@k8s-master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
3.3 安装k8s集群
参考kubeadm部署k8s集群最全最详细-CSDN博客链接实验成功
#!/bin/sh
### 版本信息
K8S_VERSION=v1.15.1
ETCD_VERSION=3.3.10
#DASHBOARD_VERSION=v1.8.3
FLANNEL_VERSION=v0.12.0-amd64
#DNS_VERSION=1.14.8
PAUSE_VERSION=3.1
coredns_version=1.3.1
## 基本组件
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION
docker pull aiotceo/kube-proxy:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION
### 网络
#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION
#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION
#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
docker pull quay.io/coreos/flannel:$FLANNEL_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version
### 前端
#docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VER
## 修改tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION k8s.gcr.io/kube-apiserver-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION k8s.gcr.io/kube-controller-manager-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION k8s.gcr.io/kube-scheduler-amd64:$K8S_VERSION
#docker tag registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-kube-proxy-amd64:$K8S_VERSION k8s.gcr.io/kube-proxy-amd64:$K8S_VERSION
docker tag aiotceo/kube-proxy:$K8S_VERSION k8s.gcr.io/kube-proxy-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION k8s.gcr.io/etcd-amd64:$ETCD_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
#docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-sidecar-amd64:$DNS_VERSION
#docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-kube-dns-amd64:$DNS_VERSION
#docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
#docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION k8s.gcr.io/kubernetes-dashboard-amd64:$DASHBOARD_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version k8s.gcr.io/coredns:$coredns_version
## 删除镜像
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION
#docker rmi registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-kube-proxy-amd64:$K8S_VERSION
docker rmi aiotceo/kube-proxy:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION
#docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION
#docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION
#docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
#docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version
三台机器分别执行上面脚本下载镜像:./k8s_image.sh
[root@k8s-master k8s]# ./k8s_image.sh
v1.15.1: Pulling from google_containers/kube-apiserver-amd64
39fafc05754f: Pull complete
5899bcec7bbf: Pull complete
Digest: sha256:b3caa91c93fb64568503c5ceef7789e3ffcf09cb7ee0efa47016c7da56100383
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.15.1
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.15.1
v1.15.1: Pulling from google_containers/kube-controller-manager-amd64
39fafc05754f: Already exists
5c943020ad72: Pull complete
Digest: sha256:c7d36404e4a21911edb99f2cf3bde70915b44238793946e055efb5826822f973
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.15.1
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.15.1
v1.15.1: Pulling from google_containers/kube-scheduler-amd64
39fafc05754f: Already exists
66ca8e0fb424: Pull complete
Digest: sha256:e170762aec896d0dc504a792c839ca72677a06e0633510462468f43cdf01412e
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.15.1
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.15.1
v1.15.1: Pulling from aiotceo/kube-proxy
39fafc05754f: Already exists
db3f71d0eb90: Pull complete
3a8a38f10886: Pull complete
Digest: sha256:5e525d1b5cf33a721768697bde0f20baa3f6967c3a10860617a15ec7cbb586d7
Status: Downloaded newer image for aiotceo/kube-proxy:v1.15.1
docker.io/aiotceo/kube-proxy:v1.15.1
3.3.10: Pulling from google_containers/etcd-amd64
90e01955edcd: Pull complete
6369547c492e: Pull complete
bd2b173236d3: Pull complete
Digest: sha256:240bd81c2f54873804363665c5d1a9b8e06ec5c63cfc181e026ddec1d81585bb
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.3.10
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.3.10
3.1: Pulling from google_containers/pause-amd64
67ddbfb20a22: Pull complete
Digest: sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1
registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1
v0.12.0-amd64: Pulling from coreos/flannel
921b31ab772b: Pull complete
4882ae1d65d3: Pull complete
ac6ef98d5d6d: Pull complete
8ba0f465eea4: Pull complete
fd2c2618e30c: Pull complete
Digest: sha256:6d451d92c921f14bfb38196aacb6e506d4593c5b3c9d40a8b8a2506010dc3e10
Status: Downloaded newer image for quay.io/coreos/flannel:v0.12.0-amd64
quay.io/coreos/flannel:v0.12.0-amd64
1.3.1: Pulling from openthings/k8s-gcr-io-coredns
e0daa8927b68: Pull complete
3928e47de029: Pull complete
Digest: sha256:638adb0319813f2479ba3642bbe37136db8cf363b48fb3eb7dc8db634d8d5a5b
Status: Downloaded newer image for registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:1.3.1
registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:1.3.1
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:v1.15.1
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64@sha256:b3caa91c93fb64568503c5ceef7789e3ffcf09cb7ee0efa47016c7da56100383
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:v1.15.1
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64@sha256:c7d36404e4a21911edb99f2cf3bde70915b44238793946e055efb5826822f973
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:v1.15.1
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64@sha256:e170762aec896d0dc504a792c839ca72677a06e0633510462468f43cdf01412e
Untagged: aiotceo/kube-proxy:v1.15.1
Untagged: aiotceo/kube-proxy@sha256:5e525d1b5cf33a721768697bde0f20baa3f6967c3a10860617a15ec7cbb586d7
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.3.10
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64@sha256:240bd81c2f54873804363665c5d1a9b8e06ec5c63cfc181e026ddec1d81585bb
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1
Untagged: registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64@sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Untagged: registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:1.3.1
Untagged: registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns@sha256:638adb0319813f2479ba3642bbe37136db8cf363b48fb3eb7dc8db634d8d5a5b
3.4 修改kubeadm-config.yaml文件
[root@k8s-master flannel]# cat ../../../install-k8s/core/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.200.141
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
3.5 初始化主节点
kubeadm config print init-defaults >kubeadm-config.yaml
[root@k8s-master k8s]# kubeadm init --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --kubernetes-version=v1.15.1
[init] Using Kubernetes version: v1.15.1
[preflight] Running pre-flight checks
[WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 24.0.6. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.200.141 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.200.141 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.200.141]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 19.005992 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 9ew53l.nvi619bsxp6lvi29
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.200.141:6443 --token 9ew53l.nvi619bsxp6lvi29 \
--discovery-token-ca-cert-hash sha256:b20d107274cd289f05391182baeaf5d54bfaad1fb09971569a8acd30bca08a41
根据提示操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
3.6 加入节点
在其余的节点机上上运行此命令即可:
kubeadm join 192.168.200.141:6443 --token 9ew53l.nvi619bsxp6lvi29 \
--discovery-token-ca-cert-hash sha256:b20d107274cd289f05391182baeaf5d54bfaad1fb09971569a8acd30bca08a41
3.7 部署网络-flannel
如果flannel未部署的话,我们用kubectl get node查看master状态会发现是NotReady的状态,
vim kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-zhangjiakou.aliyuncs.com/test-lab/coreos-flannel:s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
[root@k8s-master flannel]# kubectl create -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s-master flannel]# kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-bccdc95cf-tvstk 0/1 Pending 0 6m40s
kube-system coredns-bccdc95cf-xwprl 0/1 Pending 0 132m
kube-system etcd-k8s-master 1/1 Running 0 131m
kube-system kube-apiserver-k8s-master 1/1 Running 0 131m
kube-system kube-controller-manager-k8s-master 1/1 Running 0 131m
kube-system kube-flannel-ds-amd64-99764 1/1 Running 0 7s
kube-system kube-flannel-ds-amd64-t7nhw 0/1 Pending 0 7s
kube-system kube-proxy-rj8bt 1/1 Running 0 9m49s
kube-system kube-proxy-xrftw 1/1 Running 0 132m
kube-system kube-scheduler-k8s-master 1/1 Running 0 131m
在用kubectl get node查看状态是否变成ready,实验环境中的k8s-node1是关闭的
3.8 coredns 故障ContainerCreating
故障现象:
运行命令查看pod信息:kubectl describe po coredns-bccdc95cf-f4cvr -n kube-system
[root@k8s-master ~]# kubectl describe po coredns-bccdc95cf-f4cvr -n kube-system
Name: coredns-bccdc95cf-f4cvr
Namespace: kube-system
Priority: 2000000000
Priority Class Name: system-cluster-critical
Node: k8s-node1/192.168.200.142
Start Time: Sun, 08 Oct 2023 08:59:39 +0800
Labels: k8s-app=kube-dns
pod-template-hash=bccdc95cf
Annotations: <none>
Status: Pending
IP:
Controlled By: ReplicaSet/coredns-bccdc95cf
Containers:
coredns:
Container ID:
Image: registry.aliyuncs.com/google_containers/coredns:1.3.1
Image ID:
Ports: 53/UDP, 53/TCP, 9153/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
-conf
/etc/coredns/Corefile
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8080/health delay=0s timeout=1s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/etc/coredns from config-volume (ro)
/var/run/secrets/kubernetes.io/serviceaccount from coredns-token-bt7qw (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
config-volume:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: coredns
Optional: false
coredns-token-bt7qw:
Type: Secret (a volume populated by a Secret)
SecretName: coredns-token-bt7qw
Optional: false
QoS Class: Burstable
Node-Selectors: beta.kubernetes.io/os=linux
Tolerations: CriticalAddonsOnly
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 165m default-scheduler Successfully assigned kube-system/coredns-bccdc95cf-f4cvr to k8s-node1
Warning FailedCreatePodSandBox 165m kubelet, k8s-node1 Failed create pod sandbox: rpc error: code = Unknown desc = [failed to set up sandbox container "bb3aff3b65af590e43e87a8b8c5ba2a97527f7bd39b6b00afb5a94c5ce4a8350" network for pod "coredns-bccdc95cf-f4cvr": NetworkPlugin cni failed to set up pod "coredns-bccdc95cf-f4cvr_kube-system" network: failed to find plugin "flannel" in path [/opt/cni/bin], failed to clean up sandbox container "bb3aff3b65af590e43e87a8b8c5ba2a97527f7bd39b6b00afb5a94c5ce4a8350" network for pod "coredns-bccdc95cf-f4cvr": NetworkPlugin cni failed to teardown pod "coredns-bccdc95cf-f4cvr_kube-system" network: failed to find plugin "flannel" in path [/opt/cni/bin]]
Normal SandboxChanged 140m (x116 over 165m) kubelet, k8s-node1 Pod sandbox changed, it will be killed and re-created.
发现少了CNI插件,三个命令:
wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz
cp flannel /opt/cni/bin/
[root@k8s-master flannel]# wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
--2023-10-08 11:51:55-- http://wget/
Resolving wget (wget)... failed: Name or service not known.
wget: unable to resolve host address ‘wget’
--2023-10-08 11:51:55-- https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
Resolving github.com (github.com)... 20.205.243.166
Connecting to github.com (github.com)|20.205.243.166|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/84575398/aa05da80-9564-11ea-8c3e-599dc43b8341?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20231008%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20231008T035053Z&X-Amz-Expires=300&X-Amz-Signature=1b8f10f244c4da02b149a387ab818711898e62f867d9f15af70347e6161327e2&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=84575398&response-content-disposition=attachment%3B%20filename%3Dcni-plugins-linux-amd64-v0.8.6.tgz&response-content-type=application%2Foctet-stream [following]
--2023-10-08 11:51:55-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/84575398/aa05da80-9564-11ea-8c3e-599dc43b8341?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20231008%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20231008T035053Z&X-Amz-Expires=300&X-Amz-Signature=1b8f10f244c4da02b149a387ab818711898e62f867d9f15af70347e6161327e2&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=84575398&response-content-disposition=attachment%3B%20filename%3Dcni-plugins-linux-amd64-v0.8.6.tgz&response-content-type=application%2Foctet-stream
Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.111.133
Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 36878412 (35M) [application/octet-stream]
Saving to: ‘cni-plugins-linux-amd64-v0.8.6.tgz’
100%[=============================================================================================================================================================================================>] 36,878,412 540KB/s in 3m 12s
2023-10-08 11:55:26 (188 KB/s) - ‘cni-plugins-linux-amd64-v0.8.6.tgz’ saved [36878412/36878412]
FINISHED --2023-10-08 11:55:26--
Total wall clock time: 3m 31s
Downloaded: 1 files, 35M in 3m 12s (188 KB/s)
[root@k8s-master flannel]#
[root@k8s-master flannel]# tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz
./
./flannel
./ptp
./host-local
./firewall
./portmap
./tuning
./vlan
./host-device
./bandwidth
./sbr
./static
./dhcp
./ipvlan
./macvlan
./loopback
./bridge
[root@k8s-master flannel]#
[root@k8s-master flannel]# cp flannel /opt/cni/bin/
coredns正常
更多推荐
所有评论(0)