在这里插入图片描述

kubeadm极速部署Kubernetes 1.27.1版本集群

一、集群节点准备

1.1 主机操作系统说明

序号操作系统及版本备注
1CentOS7u9

1.2 主机硬件配置说明

需求CPU内存硬盘角色主机名
8C8G1024GBmasterk8s-master01
8C16G1024GBworker(node)k8s-worker01
8C16G1024GBworker(node)k8s-worker02

1.3 主机配置

1.3.1 主机名配置

在这里插入图片描述
(用内网ip)
由于本次使用3台主机完成kubernetes集群部署,其中1台为master节点,名称为k8s-master01;其中2台为worker节点,名称分别为:k8s-worker01及k8s-worker02

master节点
# hostnamectl set-hostname k8s-master01
worker01节点
# hostnamectl set-hostname k8s-worker01
worker02节点
# hostnamectl set-hostname k8s-worker02

1.3.2 主机IP地址配置

k8s-master节点IP地址为:192.168.10.160/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.160"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
k8s-worker1节点IP地址为:192.168.10.161/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.161"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
k8s-worker2节点IP地址为:192.168.10.162/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.162"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
1.3.3 主机名与IP地址解析

所有集群主机均需要进行配置。

# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.160 k8s-master01
192.168.10.161 k8s-worker01
192.168.10.162 k8s-worker02
1.3.4 防火墙配置

所有主机均需要操作。

关闭现有防火墙firewalld
# systemctl disable firewalld
# systemctl stop firewalld
# firewall-cmd --state
not running
1.3.5 SELINUX配置

所有主机均需要操作。修改SELinux配置需要重启操作系统。

# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# sestatus
1.3.6 时间同步配置

所有主机均需要操作。最小化安装系统需要安装ntpdate软件。

# crontab -e
0 */1 * * * /usr/sbin/ntpdate time1.aliyun.com
1.3.7 升级操作系统内核

所有主机均需要操作。

导入elrepo gpg key
# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
安装elrepo YUM源仓库
# yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
安装kernel-ml版本,ml为长期稳定版本,lt为长期维护版本
# yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
设置grub2默认引导为0
# grub2-set-default 0
重新生成grub2引导文件
# grub2-mkconfig -o /boot/grub2/grub.cfg
更新后,需要重启,使用升级的内核生效。
# reboot
重启后,需要验证内核是否为更新对应的版本
# uname -r
1.3.8 配置内核路由转发及网桥过滤

所有主机均需要操作。

添加网桥过滤及内核转发配置文件
# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
加载br_netfilter模块
# modprobe br_netfilter
查看是否加载
# lsmod | grep br_netfilter
br_netfilter           22256  0
bridge                151336  1 br_netfilter
1.3.9 安装ipset及ipvsadm

所有主机均需要操作。

安装ipset及ipvsadm
# yum -y install ipset ipvsadm
配置ipvsadm模块加载方式
添加需要加载的模块
# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
授权、运行、检查是否加载
# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
1.3.10 关闭SWAP分区

修改完成后需要重启操作系统,如不重启,可临时关闭,命令为swapoff -a

永远关闭swap分区,需要重启操作系统
# vim /etc/fstab
......
# /dev/mapper/centos-swap swap                    swap    defaults        0 0
在这行行首添加#
free -m  #查看下swap交换区是否都为0,如果都为0则swap关闭成功

二、Docker准备

2.1 Docker安装YUM源准备

使用阿里云开源软件镜像站。

# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

2.2 Docker安装

# yum -y install docker-ce

2.3 启动Docker服务

# systemctl enable --now docker

2.4 修改cgroup方式

/etc/docker/daemon.json 默认没有此文件,需要单独创建

/etc/docker/daemon.json添加如下内容

# cat /etc/docker/daemon.json
{
        "exec-opts": ["native.cgroupdriver=systemd"]
}
# systemctl restart docker

2.5 cri-dockerd安装

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1-3.el7.x86_64.rpm
# yum install cri-dockerd-0.3.1-3.el7.x86_64.rpm
# vim /usr/lib/systemd/system/cri-docker.service

修改第10行内容
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9 --container-runtime-endpoint fd://
# systemctl start cri-docker
# systemctl enable cri-docker

三、kubernetes 1.27.X 集群部署

3.1 集群软件及版本说明

kubeadmkubeletkubectl
版本1.27.X1.27.X1.27.X
安装位置集群所有主机集群所有主机集群所有主机
作用初始化集群、管理集群等用于接收api-server指令,对pod生命周期进行管理集群应用命令行管理工具

3.2 kubernetes YUM源准备

3.2.1 谷歌YUM源
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
3.2.2 阿里云YUM源
# cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

3.3 集群软件安装

所有节点均可安装

默认安装
# yum -y install  kubeadm  kubelet kubectl
查看指定版本
# yum list kubeadm.x86_64 --showduplicates | sort -r
# yum list kubelet.x86_64 --showduplicates | sort -r
# yum list kubectl.x86_64 --showduplicates | sort -r
安装指定版本
# yum -y install  kubeadm-1.27.X-0  kubelet-1.27.X-0 kubectl-1.27.X-0

3.4 配置kubelet

为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。

# vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
# systemctl enable kubelet

#systemctl status kubelet   #查看状态是否为启动

#systemctl start kubelet 
#systemctl restart kubelet 

3.5 集群镜像准备

可使用VPN实现下载。

# kubeadm config images list --kubernetes-version=v1.27.X

局域网(内网)的环境下,通过下面的脚本

# cat image_download.sh
#!/bin/bash
images_list='镜像列表'

for i in $images_list
do
        docker pull $i
done

docker save -o k8s-1-27-X.tar $images_list
添加  --cri-socket

kubeadm config images pull --cri-socket unix:///var/run/cri-dockerd.sock

在这里插入图片描述

3.6 集群初始化

[root@k8s-master01 ~]# kubeadm init --kubernetes-version=v1.27.X --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.10.160  --cri-socket unix:///var/run/cri-dockerd.sock
如果不添加--cri-socket选项,则会报错,内容如下:
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher

报错重置
命令
kubeadm reset  --cri-socket unix:///var/run/cri-dockerd.sock

清空iptables表(表里的规则清空)
iptables  -t filter -F && iptables  -t nat -F  && iptables -t mangle -F && iptables -t raw -F && iptables -X
查看日志
journalctl -xeu kubelet
初始化过程输出
[init] Using Kubernetes version: v1.27.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.10.160]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.10.160 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.10.160 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 13.006785 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 8x4o2u.hslo8xzwwlrncr8s
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.160:6443 --token 8x4o2u.hslo8xzwwlrncr8s \
        --discovery-token-ca-cert-hash sha256:7323a8b0658fc33d89e627f078f6eb16ac94394f9a91b3335dd3ce73a3f313a0

3.7 集群应用客户端管理集群文件准备

# 所有节点都要有  这个config文件  没有则从主节点复制到对应目录下
[root@k8s-master01 ~]# mkdir -p $HOME/.kube
[root@k8s-master01 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master01 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master01 ~]# ls /root/.kube/
config

从节点加入集群

kubeadm join 192.168.18.217:6443 --token 9bf1ic.yhc935n9h8dswrxq \
	--discovery-token-ca-cert-hash sha256:3bb003c17e7b8b9c294f5644800f3cca2dba52764e0febb60e0023419504d26a --cri-socket unix:///var/run/cri-dockerd.sock

在这里插入图片描述

3.8 集群网络插件部署 calico

使用calico部署集群网络

安装参考网址:https://projectcalico.docs.tigera.io/about/about-calico
在这里插入图片描述

在这里插入图片描述

应用operator资源清单文件
[root@k8s-master01 ~]# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/tigera-operator.yaml
通过自定义资源方式安装
[root@k8s-master01 ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/custom-resources.yaml
修改文件第13行,修改为使用kubeadm init ----pod-network-cidr对应的IP地址段
[root@k8s-master01 ~]# vim custom-resources.yaml
......
 11     ipPools:
 12     - blockSize: 26
 13       cidr: 10.244.0.0/16 
 14       encapsulation: VXLANCrossSubnet
......
应用资源清单文件
[root@k8s-master01 ~]# kubectl create -f custom-resources.yaml
监视calico-sysem命名空间中pod运行情况
[root@k8s-master01 ~]# watch kubectl get pods -n calico-system

Wait until each pod has the STATUS of Running.

已经全部运行
[root@k8s-master01 ~]# kubectl get pods -n calico-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-666bb9949-dzp68   1/1     Running   0          11m
calico-node-jhcf4                         1/1     Running   4          11m
calico-typha-68b96d8d9c-7qfq7             1/1     Running   2          11m

3.9 集群工作节点添加

因容器镜像下载较慢,可能会导致报错,主要错误为没有准备好cni(集群网络插件),如有网络,请耐心等待即可。

[root@k8s-worker01 ~]# kubeadm join 192.168.10.160:6443 --token 8x4o2u.hslo8xzwwlrncr8s \                              --discovery-token-ca-cert-hash sha256:7323a8b0658fc33d89e627f078f6eb16ac94394f9a91b3335dd3ce73a3f313a0 --cri-socket unix:///var/run/cri-dockerd.sock
[root@k8s-worker02 ~]# kubeadm join 192.168.10.160:6443 --token 8x4o2u.hslo8xzwwlrncr8s \
        --discovery-token-ca-cert-hash sha256:7323a8b0658fc33d89e627f078f6eb16ac94394f9a91b3335dd3ce73a3f313a0 --cri-socket unix:///var/run/cri-dockerd.sock

四、 验证集群可用性

查看所有的节点
[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES           AGE   VERSION
k8s-master01   Ready    control-plane   12h   v1.27.1
k8s-worker01   Ready    <none>          12h   v1.27.1
k8s-worker02   Ready    <none>          12h   v1.27.1
查看集群健康情况
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true","reason":""}
查看kubernetes集群pod运行情况
[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-6d4b75cb6d-js5pl               1/1     Running   0          12h
coredns-6d4b75cb6d-zm8pc               1/1     Running   0          12h
etcd-k8s-master01                      1/1     Running   0          12h
kube-apiserver-k8s-master01            1/1     Running   0          12h
kube-controller-manager-k8s-master01   1/1     Running   0          12h
kube-proxy-7nhr7                       1/1     Running   0          12h
kube-proxy-fv4kr                       1/1     Running   0          12h
kube-proxy-vv5vg                       1/1     Running   0          12h
kube-scheduler-k8s-master01            1/1     Running   0          12h
再次查看calico-system命名空间中pod运行情况。
[root@k8s-master01 ~]# kubectl get pods -n calico-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5b544d9b48-xgfnk   1/1     Running   0          12h
calico-node-7clf4                          1/1     Running   0          12h
calico-node-cjwns                          1/1     Running   0          12h
calico-node-hhr4n                          1/1     Running   0          12h
calico-typha-6cb6976b97-5lnpk              1/1     Running   0          12h
calico-typha-6cb6976b97-9w9s8              1/1     Running   0          12h

利用sealos快速安装kubernetes集群

1. 环境准备

准备三台干净(未安装过k8s环境)的虚拟机

# 所有的主机都要配置主机名和域名映射
# 设置主机名
hostnamectl set-hostname k8s-master01

# vim /etc/hosts
192.168.65.130 k8s-master01
192.168.65.140 k8s-worker01
192.168.65.141 k8s-worker02
192.168.65.142 k8s-worker03

2.安装sealos

官方文档:https://sealos.io/zh-Hans/docs/self-hosting/lifecycle-management/quick-start/deploy-kubernetes
下载地址:https://github.com/labring/sealos/releases

# 安装环境:centos7
# 下载sealos
wget https://github.com/labring/sealos/releases/download/v4.3.2/sealos_4.3.2_linux_amd64.rpm

#安装sealos
yum install sealos_4.3.2_linux_amd64.rpm

3. 安装kubernetes集群

官方文档:https://sealos.io/zh-Hans/docs/self-hosting/lifecycle-management/quick-start/deploy-kubernetes

安装方式一

# 安装kubernetes集群
sealos run labring/kubernetes:v1.27.5 labring/helm:v3.12.3 labring/calico:3.26.1 \ 
--masters 192.168.65.130  \
--nodes 192.168.65.140,192.168.65.141,192.168.65.142 -p [your-ssh-passwd]

在这里插入图片描述
注意:labring/helm 应当在 labring/calico 之前。
参数说明:

参数名参数值示例参数说明
–masters192.168.0.2kubernetes master 节点地址列表
–nodes192.168.0.3 kubernetes node 节点地址列表kubernetes node 节点地址列表
–ssh-passwd[your-ssh-passwd] ssh 登录密码ssh 登录密码
kuberneteslabring/kubernetes:v1.25.0kubernetes 镜像

在干净的服务器上直接执行上面命令,不要做任何多余操作即可启动一个高可用的 kubernetes 集群。

安装成功后输出如下内容:
在这里插入图片描述
验证k8s是否安装成功
在这里插入图片描述
在这里插入图片描述

安装方式二

# 生成配置文件
sealos gen labring/kubernetes:v1.27.5 labring/helm:v3.12.3 labring/calico:3.26.1 \
      labring/cert-manager:v1.12.3  labring/openebs:v3.7.0 \
--masters 192.168.65.130 \
--nodes 192.168.65.140,192.168.65.141,192.168.65.142 -p root > Clusterfile 

# 执行配置文件
sealos apply -f Clusterfile

安装其他组件

sealos run labring/openebs:v3.7.0
sealos run labring/cert-manager:v1.12.3
sealos run labring/minio-operator:v4.5.5 labring/ingress-nginx:v1.8.1 \   
        labring/mysql-operator:8.0.27-18.1 labring/redis-operator:v1.2.4 

增加节点
增加 node 节点:

$ sealos add --nodes 192.168.65.21:22,192.168.65.19:22 

增加 master 节点:

$ sealos add --masters 192.168.65.145:22,192.168.65.146:22

在这里插入图片描述
删除节点
删除 node 节点:

$ sealos delete --nodes 192.168.65.21:22,192.168.65.19:22  

删除 master 节点:

$ sealos delete --masters 192.168.65.145:22,192.168.65.146:22

在这里插入图片描述
清理集群

$ sealos reset

kubeasz方式

单机

https://github.com/easzlab/kubeasz/blob/master/docs/setup/quickStart.md
目前3.5.0不再维护,改成3.6.4
在这里插入图片描述

export release=3.6.4
wget https://github.com/easzlab/kubeasz/releases/download/3.6.4/ezdown
chmod +x ./ezdown

在这里插入图片描述
下载kubeasz代码、二进制、默认容器镜像

# 国内环境
./ezdown -D
# 海外环境
#./ezdown -D -m standard

【可选】下载额外容器镜像(cilium,flannel,prometheus等)

# 按需下载
./ezdown -X flannel
./ezdown -X prometheus
...

【可选】下载离线系统包 (适用于无法使用yum/apt仓库情形)

./ezdown -P

上述脚本运行成功后,所有文件(kubeasz代码、二进制、离线镜像)均已整理好放入目录/etc/kubeasz

  • /etc/kubeasz 包含 kubeasz 版本为 ${release} 的发布代码
  • /etc/kubeasz/bin 包含 k8s/etcd/docker/cni 等二进制文件
  • /etc/kubeasz/down 包含集群安装时需要的离线容器镜像
  • /etc/kubeasz/down/packages 包含集群安装时需要的系统基础软件

容器化运行 kubeasz

./ezdown -S

使用默认配置安装 aio 集群

docker exec -it kubeasz ezctl start-aio
# 如果安装失败,查看日志排除后,使用如下命令重新安装aio集群
# docker exec -it kubeasz ezctl setup default all

验证安装

$ source ~/.bashrc
$ kubectl version         # 验证集群版本     
$ kubectl get node        # 验证节点就绪 (Ready) 状态
$ kubectl get pod -A      # 验证集群pod状态,默认已安装网络插件、coredns、metrics-server等
$ kubectl get svc -A      # 验证集群服务状态

清理
以上步骤创建的K8S开发测试环境请尽情折腾,碰到错误尽量通过查看日志、上网搜索、提交issues等方式解决;当然你也可以清理集群后重新创建。

在宿主机上,按照如下步骤清理

  • 清理集群 docker exec -it kubeasz ezctl destroy default
  • 重启节点,以确保清理残留的虚拟网卡、路由等信息

集群

https://github.com/easzlab/kubeasz/blob/master/docs/setup/00-planning_and_overall_intro.md

使用sealos部署kubernetes集群并实现集群管理

本次使用4台主机完成,其中3台主机为master节点,1台主机为worker节点。

一、主机准备

1.1 配置主机名

# hostnamectl set-hostname xxx

k8s-master01
k8s-master02
k8s-master03
k8s-worker01

1.2 设置静态IP地址

序号主机名主机IP
1k8s-master01192.168.10.142
2k8s-master02192.168.10.143
3k8s-master03192.168.10.144
4k8s-worker01192.168.10.145
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="ec87533a-8151-4aa0-9d0f-1e970affcdc6"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.xxx"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"

1.3 配置主机名与IP地址解析

下面解析是管理员添加,sealos在运行过程中,也会自动添加主机名与IP地址解析关系。

# /etc/hosts
192.168.10.142 k8s-master01
192.168.10.143 k8s-master02
192.168.10.144 k8s-master03
192.168.10.145 k8s-worker01

1.4 升级内核

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64

awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg

grub2-set-default "CentOS Linux (5.4.204-1.el7.elrepo.x86_64) 7 (Core)"

reboot

二、sealos准备

wget -c https://sealyun-home.oss-cn-beijing.aliyuncs.com/sealos-4.0/latest/sealos-amd64 -O sealos &&     chmod +x sealos && mv sealos /usr/bin
# sealos version
{"gitVersion":"4.0.0","gitCommit":"7146cfe","buildDate":"2022-06-30T14:24:31Z","goVersion":"go1.17.11","compiler":"gc","platform":"linux/amd64"}

三、使用sealos部署kubernetes集群

kubernetes集群默认使用containerd

sealos run labring/kubernetes:v1.24.0 labring/calico:v3.22.1     --masters 192.168.10.142,192.168.10.143,192.168.10.144     --nodes 192.168.10.145     --passwd centos
# kubectl get nodes
NAME           STATUS   ROLES           AGE   VERSION
k8s-master01   Ready    control-plane   16h   v1.24.0
k8s-master02   Ready    control-plane   16h   v1.24.0
k8s-master03   Ready    control-plane   16h   v1.24.0
k8s-worker01   Ready    <none>          16h   v1.24.0
# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS      AGE
coredns-6d4b75cb6d-59ph5               1/1     Running   1 (15h ago)   16h
coredns-6d4b75cb6d-wz6tx               1/1     Running   1 (15h ago)   16h
etcd-k8s-master01                      1/1     Running   1 (15h ago)   16h
etcd-k8s-master02                      1/1     Running   1 (15h ago)   16h
etcd-k8s-master03                      1/1     Running   1 (15h ago)   16h
kube-apiserver-k8s-master01            1/1     Running   3 (15h ago)   16h
kube-apiserver-k8s-master02            1/1     Running   1 (15h ago)   16h
kube-apiserver-k8s-master03            1/1     Running   1 (15h ago)   16h
kube-controller-manager-k8s-master01   1/1     Running   3 (15h ago)   16h
kube-controller-manager-k8s-master02   1/1     Running   1 (15h ago)   16h
kube-controller-manager-k8s-master03   1/1     Running   1 (15h ago)   16h
kube-proxy-5l26r                       1/1     Running   1 (15h ago)   16h
kube-proxy-cfbkh                       1/1     Running   1 (15h ago)   16h
kube-proxy-g92fs                       1/1     Running   1 (15h ago)   16h
kube-proxy-zsjxv                       1/1     Running   1 (15h ago)   16h
kube-scheduler-k8s-master01            1/1     Running   3 (15h ago)   16h
kube-scheduler-k8s-master02            1/1     Running   1 (15h ago)   16h
kube-scheduler-k8s-master03            1/1     Running   1 (15h ago)   16h
kube-sealyun-lvscare-k8s-worker01      1/1     Running   1 (15h ago)   16h

四、使用kuboard实现k8s集群托管

序号主机名主机IP
1kuboard-server192.168.10.146

4.1 kuboard部署及访问

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce
systemctl enable --now docker
docker run -d   --restart=unless-stopped   --name=kuboard   -p 80:80/tcp   -p 10081:10081/tcp   -e KUBOARD_ENDPOINT="http://192.168.10.146:80"   -e KUBOARD_AGENT_SERVER_TCP_PORT="10081"   -v /root/kuboard-data:/data   eipwork/kuboard:v3

用户名和密码分别为:admin及Kuboard123

在这里插入图片描述

4.2 kuboard添加k8s集群

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

[root@k8s-master01 ~]# kubectl apply -f kuboard-agent.yaml

namespace/kuboard created
serviceaccount/kuboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-admin-crb created
serviceaccount/kuboard-viewer created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-viewer-crb created
deployment.apps/kuboard-agent-du7gv7 created
deployment.apps/kuboard-agent-du7gv7-2 created
[root@k8s-master01 ~]# kubectl get pods -n kuboard
NAME                                      READY   STATUS    RESTARTS   AGE
kuboard-agent-du7gv7-2-84f65f77b8-rcb4x   1/1     Running   0          54s
kuboard-agent-du7gv7-56c7cb9564-m78qx     1/1     Running   0          54s

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

Kube-install

https://blog.csdn.net/qq_16038125/article/details/128272302

https://ghproxy.com/https://raw.githubusercontent.com/lework/kainstall/master/kainstall-centos.sh

Kubernetes集群UI及主机资源监控

1.24及以下的版本

一、Kubernetes dashboard作用

  • 通过dashboard能够直观了解Kubernetes集群中运行的资源对象
  • 通过dashboard可以直接管理(创建、删除、重启等操作)资源对象

在这里插入图片描述

二、获取Kubernetes dashboard资源清单文件

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml

三、修改并部署kubernetes dashboard资源清单文件

[root@k8s-master1 ~]# vi recommended.yaml
......
上述内容不变

为了方便在容器主机上访问,下面的service需要添加NodePort类型及端口
---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort     #这里
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard

此证书不注释,对于早期版本需要注释。
---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque    #这里

---
......

中间内容不用改变

需要修改登录kubernetes dashboard后用户的身份,不然无法显示资源情况
---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin   一定要把原来的kubernetes-dashboard修改为cluster-admin,不然进入UI后会报错。
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

......

以下内容暂不修改

在这里插入图片描述
在这里插入图片描述

kubectl apply -f recommended.yaml

四、访问Kubernetes dashboard

使用:https://IP地址:30000 访问

在这里插入图片描述

kubectl get secret -n kubernetes-dashboard
NAME                               TYPE                                  DATA   AGE
default-token-dzr9f                kubernetes.io/service-account-token   3      3m59s
kubernetes-dashboard-certs         Opaque                                0      3m59s
kubernetes-dashboard-csrf          Opaque                                1      3m59s
kubernetes-dashboard-key-holder    Opaque                                2      3m59s
kubernetes-dashboard-token-g6pq7   kubernetes.io/service-account-token   3      3m59s 用此token
kubectl describe secret kubernetes-dashboard-token-g6pq7 -n kubernetes-dashboard
Name:         kubernetes-dashboard-token-g6pq7
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: 46292f26-3046-411a-a9df-eac200290722

Type:  kubernetes.io/service-account-token

Data
====
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImVGc2xjT05uekl0MlVOZ0VCSlhHSURfOXd6WGFvVnZFZmNwREwtVk1STlEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1nNnBxNyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjQ2MjkyZjI2LTMwNDYtNDExYS1hOWRmLWVhYzIwMDI5MDcyMiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.qJMnfpKvpZNziXfkamdtYyIHrPnwisJBjlyCg_XWHoVPs5gNouGfrYkcxUKMdP9pBa7n1TwrurL3ppZTOAJRNSGO94F7BOXIFZ8O1-Ff1LZicWQrikSXDDyyWWEWypPHBIgOTGN_HMFJnIF98JnYd8vzrVVZBfiXco6lkVOK4eTQY87FgB0iJtXWh5LITefkNJm2d8o0tn2zrVnRUZ_TYisnirJOOrlx-GzfnwlXQxdaQRxdgEHHK3-lNZli54XtjB7IwP5jaER4mQ_sMTxrEMC-If46_ftMQqKn3R6YTGTG8UP49Xji_tPp--L3RUQI7vakr0x5-Cv_y0JlKEmlog 复制token全部内容
ca.crt:     1359 bytes

在这里插入图片描述

在这里插入图片描述

五、使用metrics-server实现主机资源监控

5.1 获取metrics-server资源清单文件

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述

wget  https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml

5.2 修改metrics-server资源清单文件

# vim components.yaml

spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,InternalDNS,ExternalDNS,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls 添加此行内容

5.3 部署metrics-server资源清单文件

# kubectl top nodes

error: Metrics API not available
# kubectl top pods

error: Metrics API not available
kubectl apply -f components.yaml

5.4 验证及授权

kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-7cc8dd57d9-fk777   1/1     Running   1          2d1h
calico-node-57vrc                          1/1     Running   0          2d1h
calico-node-7828d                          1/1     Running   0          2d1h
calico-node-n264t                          1/1     Running   0          2d1h
calico-node-nkxrs                          1/1     Running   0          2d1h
coredns-675db8b7cc-jp54h                   1/1     Running   0          2d1h
metrics-server-8bb87844c-4ttp9             1/1     Running   0          9m52s 此pod
# kubectl top nodes

Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io)
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
# kubectl top nodes

NAME          CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
k8s-master1   97m          4%     2497Mi          65%
k8s-master2   133m         6%     2290Mi          60%
k8s-master3   95m          4%     2215Mi          58%
k8s-worker1   45m          2%     1062Mi          27%
]# kubectl top pods

NAME              CPU(cores)   MEMORY(bytes)
nginx-web-bbh48   0m           1Mi
nginx-web-x85nl   0m           1Mi

在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐