k8s单master集群搭建
k8s单master集群搭建
·
k8s单master集群搭建
一、初始化实验环境
192.168.1.11 master 2核4G
192.168.1.12 node1 2核4G
1、各节点操作,yum源、停用防火墙,时间同步,关闭交换空间,selinux
[root@master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@master ~]# curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
[root@master ~]# yum makecache fast
[root@master ~]# systemctl stop firewalld && systemctl disable firewalld
[root@master ~]# yum -y install ntpdate
[root@master ~]# ntpdate cn.pool.ntp.org
10 Dec 08:39:05 ntpdate[1170]: step time server 139.199.214.202 offset -46807.740797 sec
[root@master ~]# crontab -e
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@master ~]# service crond restart
[root@master ~]# swapoff -a
[root@master ~]# sed -i '/swap/s/^/#/' /etc/fstab
[root@master ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@master ~]# reboot -f
2、修改内核参数,配置hosts文件
[root@master ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl --system
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# echo 192.168.1.11 master >> /etc/hosts
[root@master ~]# echo 192.168.1.12 node1 >> /etc/hosts
3、配置桥接模式,开启IPVS
[root@master ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
[root@master ~]# echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
[root@master ~]# echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
""" > /etc/sysctl.conf
[root@master ~]# sysctl -p
[root@master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
[root@master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
4、安装docker 、kubeadm、kubelet,配置自启动
[root@master ~]# yum -y install docker-ce-19.03.7-3.el7
[root@master ~]# systemctl enable docker && systemctl start docker && systemctl status docker
[root@master ~]# yum -y install kubeadm-1.18.2 kubelet-1.18.2
[root@master ~]# systemctl enable kubelet
5、导入镜像
[root@master ~]# ls *.gz
cordns.tar.gz kube-apiserver.tar.gz kube-proxy.tar.gz pause.tar.gz
etcd.tar.gz kube-controller-manager.tar.gz kube-scheduler.tar.gz
[root@master ~]# for i in `ls *.gz`;do docker load -i $i ;done
二、初始化k8s集群
1、master1节点初始化k8s集群
[root@master ~]# kubeadm init --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.1.11
**### 这里没有配置镜像仓库,如果配置了仓库,可以添加仓库的参数进行初始化
**## kubeadm init --image-repository=镜像地址 --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address 192.168.4.11
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.11:6443 --token 5jp17c.r7b8hilfeike1mm7 \
--discovery-token-ca-cert-hash sha256:65793aa2463d8beeeedcc524aef947a120c3af4e7172e9f1e39a4468e641edda
按照提示进行操作,此时nodes节点还未ready,因为网络插件未安装
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 44s v1.18.2
[root@master ~]# kubectl get pods -n kube-system
coredns-7ff77c879f-j48h6 0/1 Pending 0 56s
coredns-7ff77c879f-lrb77 0/1 Pending 0 56s
2、安装网络插件calico
这里用到了镜像cni.tar.gz calico-node.tar.gz calico.yaml
[root@master ~]# vim calico.yaml
167 value: "can-reach=192.168.1.11"
181 value: "10.244.0.0/16"
kubectl apply -f calico.yaml
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready master 46m v1.18.2
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-66bff467f8-dqdd6 1/1 Running 0 47m
coredns-66bff467f8-qr5zg 1/1 Running 0 47m
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready master 37m v1.18.2
3、节点加入k8s集群
这里的token 和hash在master节点init的时候有提示
[root@master1 ~]# kubeadm join 192.168.1.11:6443 --token 5jp17c.r7b8hilfeike1mm7 \
--discovery-token-ca-cert-hash sha256:65793aa2463d8beeeedcc524aef947a120c3af4e7172e9f1e39a4468e641edda
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 50m v1.18.2
node1 Ready <none> 24s v1.18.2
master查看token、获取token_hash值
[root@master ~]# kubeadm token list
TOKEN TTL ...
abcdef.0123456789abcdef 20h
[root@master ~]# kubeadm token delete abcdef.0123456789abcdef
[root@master ~]# kubeadm token create --ttl=0 #创建永久的token
[root@master init]# kubeadm token list
TOKEN TTL
il1smu.zavpln6h8awhujrw <forever> #token的id
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der |openssl dgst -sha256 -hex #获取token_hash值
writing RSA key
(stdin)= aa1323b38a641009b06f44f284170d808399c431fa33fe0ee28048b28c86be16
更多推荐
所有评论(0)