kubeadm安装k8s

系统初始化

关闭防火墙

	systemctl stop firewalld
	systemctl disable firewalld

关闭selinux

	 sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
	 setenforce 0 # 临时

关闭swap

	 swapoff -a # 临时
	 sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久

修改主机名

	 hostnamectl set-hostname <hostname>

k8s各节点添加hosts

	cat >> /etc/hosts << EOF
	172.172.88.51 master01
	172.172.88.52 master02
	172.172.88.53 master03
	EOF

将桥接的IPV4流量传递到iptables链

	cat > /etc/sysctl.d/k8s.conf << EOF
	net.ipv4.ip_forward = 1
	net.bridge.bridge-nf-call-ip6tables = 1
	net.bridge.bridge-nf-call-iptables = 1
	EOF
	sysctl --system 

kube-proxy开启ipvs的前置

yum -y install ipvsadm
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e 	nf_conntrack_ipv4

安装docker软件–所有节点

安装 docker依赖

yum install -y yum-utils  device-mapper-persistent-data lvm2

加载repo源并安装docker

yum-config-manager  --add-repo   https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
yum  list|grep docker-ce
yum -y install docker-ce

配置daemon

cat  > /etc/docker/daemon.json  <<EOF
{
        "exec-opts": ["native.cgroupdriver=systemd"],
        "log-driver": "json-file",
        "log-opts": {
          "max-size": "100m"
        },
        "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

优化docker数据目录

mkdir /opt/docker_data
vim /usr/lib/systemd/system/docker.service
在ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock  
增加 --graph=/opt/docker_data

启动docker

 systemctl daemon-reload && systemctl enable docker && systemctl start docker

安装keepvlied(阿里云)

申请HaVip(高可用虚拟ip)

首先去阿里云申请HaVip并绑定ECS服务器。

安装keepavlied(三个主节点)

yum -y install keepalived 

修改keepalived.conf

! Configuration File for keepalived
global_defs {
  notification_email {
 acassen@firewall.loc
 failover@firewall.loc
 sysadmin@firewall.loc
  }
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
#vrrp_script  checkhaproxy 
#{
	#    script "/etc/keepalived/do_sth.sh"
	#    interval 5
#}
vrrp_instance VI_1 {
state MASTER           #设置ECS1实例为主实例
	interface eth0          #设置网卡名,本示例配置为eth0  
	virtual_router_id 51
	nopreempt              
	#    preempt_delay 10
	priority 100             #设置优先级,数字越大,优先级越高,本示例配置主用实例优先级为100
	advert_int 1        
	authentication {
    	auth_type PASS
        auth_pass 1111
	}
	unicast_src_ip 172.172.88.51   #设置ECS实例的私网IP地址
	unicast_peer {
    	172.172.88.52           #对端ECS实例的私网IP地址
    	172.172.88.53
	}
	virtual_ipaddress {
    	172.172.88.54          #设置HaVip的IP地址
	}
	notify_master "/etc/keepalived/notify_action.sh MASTER"
	notify_backup "/etc/keepalived/notify_action.sh BACKUP"
	notify_fault "/etc/keepalived/notify_action.sh FAULT"
	notify_stop "/etc/keepalived/notify_action.sh STOP"
	garp_master_delay 1
	garp_master_refresh 5

     	track_interface {
                	eth0                #设置ECS实例网卡名,本示例配置为eth0
        }
#    track_script {
#        checkhaproxy 
#    }
}

启动keepalived

systemctl  start  keepalived

安装k8s

添加安利云kubernetes源

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubeadm、kubelet、kubectl(三个主节点)

yum -y install kubeadm-1.20.10-0  kubelet-1.20.10-0  kubectl-1.20.10-0
systemctl enable kubelet 

查看kubeadm所需镜像

kubeadm  config images list 

下载镜像阿里云k8s镜像

#!/bin/bash

set -e

KUBE_VERSION=v1.20.15
KUBE_PAUSE_VERSION=3.2
ETCD_VERSION=3.4.13-0
CORE_DNS_VERSION=1.7.0

GCR_URL=k8s.gcr.io
ALIYUN_URL=registry.cn-hangzhou.aliyuncs.com/google_containers

images=(kube-proxy:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-apiserver:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION}
coredns:${CORE_DNS_VERSION})

for imageName in ${images[@]} ; do
	docker pull $ALIYUN_URL/$imageName
	docker tag  $ALIYUN_URL/$imageName $GCR_URL/$imageName
	docker rmi $ALIYUN_URL/$imageName
done

打印默认初始化节点配置文件

 kubeadm  config  print  init-defaults  >kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
 usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 1.2.3.4    #修改为本机IP或VIP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master01
taints:
- effect: NoSchedule
   key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
  apiVersion: kubeadm.k8s.io/v1beta2
  certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.20.15
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"     ##指明pod的IP网段,需要自主添加
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---                                                         ##开启ipvs转发,手动添加 ,1.19版本之前
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

                                                                            ##开启ipvs转发,手动添加 ,1.19版本之后
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

初始化

kubeadm init --config=kubeadm-config.yaml    |tee   init.log
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装flannel

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

集群健康检查

kubectl  get cs
NAME                 		  STATUS    MESSAGE             ERROR
scheduler					  Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused  
controller-manager            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0                        Healthy     {"health":"true"}  

以上报错为 scheduler,controller-manager,配置文件中设置的默认端口是0导致的,解决方式是注释掉对应的port即可,操作如下:
cd /etc/kubernetes/
vim kube-controller-manager.yaml
#    - --port=0   #第26行
vim kube-scheduler.yaml
####    - --port=0 #第19行

再次执行,
kubectl  get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

等待一段时间后如果未恢复,重启kubelet
systemctl restart kubelet.service

master节点加入集群

查看及创建token

查看节点状态
kubectl  get nodes 
查看token
kubeadm token list
生成token
kubeadm token create  
生成用不过时token 有安全隐患
kubeadm token create  --ttl 0

修改配置文件

kubectl -n kube-system edit cm kubeadm-config
在clusterName: kubernetes 添加如下配置
controlPlaneEndpoint: 集群IP:6443

不加上述配置,master加入集群会报错。

获取master证书

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'

生成加入集群命令(node节点加入)

kubeadm token create --print-join-command

获取control-plane证书(以manager节点加入方式才需要此步骤)

kubeadm init phase upload-certs --upload-certs 

node执行

kubeadm join 集群IP:6443 --token yunooj.hifya8wl8hq662op     --discovery-token-ca-cert-hash sha256:a7e7bfac430d6efa52223dc6f7efb50f5e8287c2e886d9b8329f47e2de41f07d      --control-plane --certificate-key  6ab963ed97ca8987ef5ac4f06fecd0ac6936c3ed51d6fea0286f69574914d305 
#该命令为‘kubeadm token create --print-join-command’与‘kubeadm init phase upload-certs --upload-certs ’两命令的结合
#表示为以master的身份加入集群

节点状态检查

kubectl get  nodes 
NAME       STATUS   ROLES                  AGE    VERSION
master01   Ready    control-plane,master   4d3h   v1.20.10
master02   Ready    control-plane,master   4d     v1.20.10
master03   Ready    control-plane,master   4d     v1.20.10

污点

查看k8s节点的污点

 kubectl  describe  node   master01  |grep -C 5 -i  taint

添加污点

语法:
kubectl taint node [node] key=value[effect]   
 	其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ]
  	NoSchedule: 一定不能被调度
  	PreferNoSchedule: 尽量不要调度
  	NoExecute: 不仅不会调度, 还会驱逐Node上已有的Pod
示例:
kubectl taint node node1 key1=value1:NoSchedule
kubectl taint node node1 key1=value1:NoExecute
kubectl taint node node1 key2=value2:NoSchedule

删除污点

示例:
kubectl taint node node1 key1:NoSchedule-  # 这里的key可以不用指定value
kubectl taint node node1 key1:NoExecute-
# kubectl taint node node1 key1-  删除指定key所有的effect
kubectl taint node node1 key2:NoSchedule-

master添加taint

kubectl taint nodes master1 node-role.kubernetes.io/master=:NoSchedule
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐