k8s-1.28.2安装

1、主机规划

操作系统:Centos 8.3
k8s版本:1.27
机器配置:2C 4G 100GB

k8s	
主机		   		IP
k8s-master1		192.168.1.40
k8s-master2		192.168.1.41
k8s-master3		192.168.1.42
k8s-node1		192.168.1.43
k8s-node2		192.168.1.44
k8s-node3		192.168.1.45
k8s-node4		192.168.1.46
harbor			192.168.1.47
ha+ka-1			192.168.1.48
ha+ka-2			192.168.1.49

2、安装centos 8.3

3、初始化操作系统

# 配置hosts文件
192.168.1.40 k8s-master1
192.168.1.41 k8s-master2
192.168.1.42 k8s-master3
192.168.1.43 k8s-node1
192.168.1.44 k8s-node2
192.168.1.45 k8s-node3
192.168.1.46 k8s-node4
192.168.1.47 harbor
192.168.1.48 ha+ka-1
192.168.1.49 ha+ka-2

#修改YUM源为阿里云
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo

#安装依赖包
yum -y install wget jq psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl -y

#关闭防火墙、selinux、关闭交换分区
systemctl disable --now firewalld
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0

#做系统无密码互信登录
    在master和node节点分别创建公钥私钥对
        ssh-keygen -t rsa
    在master和node节点分别执行
        ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.1.*

PS:HA+KA初始化只需要做到此处

#添加kubernetes源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0

 
#修改阿里云镜像源并更新内核
mv /etc/yum.repos.d/elrepo.repo /etc/yum.repos.d/elrepo.repo.bak 
cat >> /etc/yum.repos.d/elrepo.repo << EOF
[elrepo-kernel]
name=elrepoyum
baseurl=https://mirrors.aliyun.com/elrepo/kernel/el8/x86_64/
enable=1
gpgcheck=0
EOF
yum  --enablerepo=elrepo-kernel  install  kernel-ml -y
或
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
  	
#使用序号为0的内核,序号0是前面查出来的可用内核编号
grub2-set-default 0

#启用ipvs
yum install ipvsadm ipset sysstat conntrack libseccomp -y
mkdir -p /etc/modules-load.d/
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service

#修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF

    modprobe br_netfilter		#加载br_netfilter模块
    lsmod |grep conntrack		#查看是否加载
    modprobe ip_conntrack
    sysctl -p /etc/sysctl.d/k8s.conf
    
###重启系统

#添加路由条目
route add default gw 192.168.1.1

在这里插入图片描述
在这里插入图片描述

4、安装docker

4.1官方脚本安装

#下载docker
curl -fsSL "https://get.docker.com/" | sh

#创建daemon.json文件
cat >/etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
    "https://docker.mirrors.ustc.edu.cn",
    "http://hub-mirror.c.163.com"
  ],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
    },
  "data-root": "/var/lib/docker"
}
EOF

#编辑docker.service文件,添加IP或域名
vim /lib/systemd/system/docker.service=
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --insecure-registry {IP} --insecure-registry {域名}

#重启docker服务
systemctl daemon-reload && systemctl restart docker

4.2、二进制安装

# 二进制包下载地址(根据所需版本下载)
https://download.docker.com/linux/static/stable/x86_64/

#解压
tar xf docker-*.tgz 

#拷贝二进制文件
cp docker/* /usr/bin/

#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service

#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF

#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF

#创建docker组
groupadd docker

#启动docker
systemctl enable --now docker.socket  && systemctl enable --now docker.service

#验证
docker info

5、安装cri-dockerd

# 由于1.24以及更高版本不支持docker所以安装cri-docker
# 下载cri-docker 
https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.7/cri-dockerd-0.3.7.amd64.tgz

# 解压cri-docker并拷贝二进制文件
tar -zxvf cri-dockerd-0.3.7.amd64.tgz
cp cri-dockerd/cri-dockerd  /usr/bin/
chmod +x /usr/bin/cri-dockerd

# 写入cri-docker启动配置文件
cat >  /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
 
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
 
StartLimitBurst=3
 
StartLimitInterval=60s
 
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
 
TasksMax=infinity
Delegate=yes
KillMode=process
 
[Install]
WantedBy=multi-user.target
EOF

# 写入cri-docker.socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
 
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
 
[Install]
WantedBy=sockets.target
EOF

# 进行启动cri-docker
systemctl daemon-reload && systemctl enable cri-docker --now

在这里插入图片描述

6、配置haproxy和keepalived

#首先要安装 负载均衡器 haproxy + keepalived
yum install -y haproxy keepalived

6.1、配置keepalived

#配置keepalived
cd /etc/keepalived/
cp -p keepalived.conf keepalived.conf.bak
vim /etc/keepalived/keepalived.conf
6.1.1、haka-1
 # cat >/etc/keepalived/keepalived.conf<<EOF 
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state MASTER
    interface ens192
    mcast_src_ip 192.168.1.48
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.120
    }
    track_script {
       chk_apiserver
    }
 }
EOF
6.1.2、haka-2
 # cat >/etc/keepalived/keepalived.conf<<EOF 
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state MASTER
    interface ens192
    mcast_src_ip 192.168.1.49
    virtual_router_id 51
    priority 99
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.120
    }
    track_script {
       chk_apiserver
    }
 }
EOF
6.1.3健康检查脚本
#健康检查脚本:

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
 #!/bin/bash
 err=0
 for k in $(seq 1 3)
 do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
 done
 
 if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
 else
    exit 0
 fi
EOF

测试:手动将某个节点停掉,看是否进行切换
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

6.2、配置haproxy

#修改 /etc/sysctl.conf ,两个节点相同配置
vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind=1
---
sysctl -p 

#修改haproxy配置文件
cd /etc/haproxy/
mv haproxy.cfg haproxy.cfg.bak 
cat > /etc/haproxy/haproxy.cfg << EOF
 global
  maxconn 2000
  ulimit-n 16384
  log 127.0.0.1 local0 err
  stats timeout 30s

 defaults
  log global
  mode http
  option httplog
  timeout connect 5000
  timeout client 50000
  timeout server 50000
  timeout http-request 15s
  timeout http-keep-alive 15s

 frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

 frontend k8s-master
  bind 192.168.1.120:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-masters

 backend k8s-masters
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server  k8s-master1  192.168.1.40:6443 check
  server  k8s-master2  192.168.1.41:6443 check
  server  k8s-master3  192.168.1.42:6443 check
EOF

setsebool -P haproxy_connect_any=1
service haproxy start 
systemctl enable haproxy 

7、安装harbor

#初始化系统#安装docker
略,同上
#安装harbor
wget https://github.com/docker/compose/releases/download/2.23.1/docker-compose-Linux-x86_64
chmod +x docker-compose-Linux-x86_64
cp docker-compose-Linux-x86_64 /usr/bin/docker-compose

#配置磁盘
添加一块500GB的磁盘
#查看磁盘信息
fdisk -l

#给新硬盘创建新分区
fdisk /dev/sdb

在这里插入图片描述

#格式化硬盘
mkfs.xfs /dev/sdb1

在这里插入图片描述

#实现永久挂载
vim /etc/fstab
mkdir /harbor
mount -a

在这里插入图片描述
在这里插入图片描述

#准备harbor安装包
https://github.com/goharbor/harbor/releases/download/v2.9.1/harbor-offline-installer-v2.9.1.tgz

#解压安装
tar xf harbor-offline-installer-v2.9.1.tgz
cd harbor
cp harbor.yml.tmpl harbor.yml

##创建签发证书的目录
mkdir certs
touch /root/.rnd
cd certs/
openssl genrsa -out harbor-ca.key
openssl req -x509 -new -nodes -key harbor-ca.key -subj "/CN=192.168.1.47" -days 7120 -out harbor-ca.crt

#修改harbor配置文件
vim harbor.yml
    hostname: 192.168.1.47
    certificate: /apps/harbor/certs/harbor-ca.crt
    private_key: /apps/harbor/certs/harbor-ca.key
    harbor_admin_password: 123456
    data_volume: /harbor
    
#安装harbor并打开扫描库
./install.sh --with-trivy

在这里插入图片描述

#在各节点创建证书存放文件夹
mkdir -p /etc/docker/certs.d/192.168.1.47

#将证书传到需要下载镜像的主机
scp harbor-ca.crt k8s-master1:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-master2:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-master3:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-node1:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-node2:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-node3:/etc/docker/certs.d/192.168.1.47
scp harbor-ca.crt k8s-node4:/etc/docker/certs.d/192.168.1.47

#编辑docker.service文件,添加IP和域名
vim /etc/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --insecure-registry {IP} --insecure-registry {域名}
#各节点重启docker服务
systemctl daemon-reload && systemctl restart docker

#harbor重启docker-compose
docker-compose down -v
docker-compose up -d

8、部署k8s集群

8.1安装最新版kubeadm、kubectl、kubelet

#安装最新版kubeadm、kubectl、kubelet
#查看指定版本
yum list kubeadm.x86_64 --showduplicates | sort -r
yum list kubelet.x86_64 --showduplicates | sort -r
yum list kubectl.x86_64 --showduplicates | sort -r
#默认安装最新版
yum install -y kubeadm kubectl kubelet

#为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。
vim /etc/sysconfig/kubelet
	KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

#设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet
systemctl enable kubelet.service

8.2、准备镜像

#kubeadm config images list --kubernetes-version=v1.28.2
----
registry.k8s.io/kube-apiserver:v1.28.2
registry.k8s.io/kube-controller-manager:v1.28.2
registry.k8s.io/kube-scheduler:v1.28.2
registry.k8s.io/kube-proxy:v1.28.2
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
----

docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.2
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.28.2
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.28.2
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.28.2
docker pull registry.aliyuncs.com/google_containers/pause:3.9
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.9-0
docker pull registry.aliyuncs.com/google_containers/coredns:v1.10.1

#images_dowmload.sh
#!/bin/bash
images_list='
kube-apiserver:v1.28.2
kube-controller-manager:v1.28.2
kube-scheduler:v1.28.2
kube-proxy:v1.28.2
pause:3.9
etcd:3.5.9-0
coredns:v1.10.1'

for i in $images_list
do
     docker pull registry.aliyuncs.com/google_containers/$i
     docker pull 192.168.1.47/google_containers/$i  #根据实际情况使用这两条命令
done

#images_tag.sh
#!/bin/bash
images_list='
kube-apiserver:v1.28.2
kube-controller-manager:v1.28.2
kube-scheduler:v1.28.2
kube-proxy:v1.28.2
pause:3.9
etcd:3.5.9-0
coredns:v1.10.1'

for i in $images_list
do
     docker tag registry.aliyuncs.com/google_containers/$i 192.168.1.47/google_containers/$i
     docker push 192.168.1.47/google_containers/$i
done

8.3基于命令初始化master

8.3.1、kubeadm init 常用参数
kubeadm init是用于初始化Kubernetes集群的命令,它支持多个参数和选项。以下是kubeadm init命令的常用参数说明:

--config <file>:指定用于初始化集群的配置文件的路径。
--cri-socket <socket>:指定容器运行时的Socket文件路径。默认为/var/run/dockershim.sock。
--control-plane-endpoint <ip-address>:指定控制平面节点的API服务器地址和端口。默认为自动检测的本地IP地址。
--control-plane-endorsement <value>:指定是否允许节点加入控制平面。默认为自动检测。
--cri-socket-timeout <duration>:指定与容器运行时通信的超时时间。默认为0s,即无超时限制。
--ignore-preflight-errors <errors>:忽略预检错误。可以使用逗号分隔的错误列表,或者指定all来忽略所有错误。
--apiserver-advertise-address <ip-address>:指定API服务器广告的地址。默认为自动检测的本地IP地址。
--apiserver-bind-port <port>:指定API服务器绑定的端口号。默认为6443。
--apiserver-cert-extra-sans <extra-sans>:指定API服务器证书中的额外主题备用名称(Subject Alternative Names)。
--apiserver-cert-extra-ip <extra-ips>:指定API服务器证书中的额外IP地址。
--upload-certs:上传证书到ETCD,用于备份和恢复控制平面节点。
--service-cidr <cidr>:指定服务网络的CIDR范围。默认为10.96.0.0/12。
--pod-network-cidr <cidr>:指定Pod网络的CIDR范围。通常与网络插件所需的CIDR范围保持一致。
--feature-gates <feature-gates>:启用或禁用特定的功能。可以使用逗号分隔的键值对列表。
--token <token>:指定用于节点加入集群的令牌。
--token-ttl <duration>:指定节点加入令牌的有效期限。默认为24h。
--token-cacert-hash <token-cacert-hash>:指定加入令牌的CA证书哈希值。
8.3.2、初始化命令
kubeadm init --control-plane-endpoint=192.168.1.120:16443 --kubernetes-version=1.28.2 --pod-network-cidr=10.100.0.0/16 --service-cidr=10.200.0.0/16 --service-dns-domain=zhangluo.local --image-repository=192.168.1.47/google_containers --cri-socket unix:///var/run/cri-dockerd.sock
8.3.3、重置命令
kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock

在这里插入图片描述

8.3.4、主节点准备kubelet文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

8.4、其他master节点加入集群

8.4.1、同步key到其它master节点
scp -r /etc/kubernetes/pki root@k8s-master2:/etc/kubernetes/
scp -r /etc/kubernetes/pki root@k8s-master3:/etc/kubernetes/
8.4.2、把不需要的证书删除,否则无法加入集群
#切记不要删除master1上的证书
cd /etc/kubernetes/pki/
rm -rf apiserver* 
rm -rf etcd/peer.*
rm -rf etcd/server.* 
8.4.3、当前初始化成功的maste节点生成证书用于添加新master节点
#生成证书
# kubeadm init phase upload-certs --upload-certs
c0762a6d292f1957471b10869989adbeb669c7db2a17a1f43562d7b65a3696a2
8.4.4、将master2、master3加入集群
kubeadm join 192.168.1.120:16443 --token 1m1rdw.b54x27pfec5q60gl --discovery-token-ca-cert-hash sha256:24a05572ca6cf71f13d95923b9380a6517ffb2e9f693f08e6d2bc1c493205821 --control-plane --certificate-key c0762a6d292f1957471b10869989adbeb669c7db2a17a1f43562d7b65a3696a2 --cri-socket unix:///var/run/cri-dockerd.sock

在这里插入图片描述

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config
8.4.5、查看master节点是否添加成功
kubectl get node

在这里插入图片描述

8.5、添加node节点

kubeadm join 192.168.1.120:16443 --token 1m1rdw.b54x27pfec5q60gl --discovery-token-ca-cert-hash sha256:24a05572ca6cf71f13d95923b9380a6517ffb2e9f693f08e6d2bc1c493205821  --cri-socket unix:///var/run/cri-dockerd.sock

在这里插入图片描述
在这里插入图片描述

9、安装网络插件

网络组件有很多种,只需要部署其中一个即可,推荐Calico。

Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。

Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器( vRouter) 来负责数据转发,而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。

此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。

9.1、下载Calico

wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml

vim calico.yaml
...

name: CALICO_IPV4POOL_CIDR
value: "10.100.0.0/16"
...

9.2准备镜像

  #master服务器
 cat calico.yaml |grep image
          image: docker.io/calico/cni:v3.25.0
          image: docker.io/calico/node:v3.25.0
          image: docker.io/calico/kube-controllers:v3.25.0

 docker pull docker.io/calico/cni:v3.25.0
 docker pull docker.io/calico/node:v3.25.0
 docker pull docker.io/calico/kube-controllers:v3.25.0

9.3、安装calico

kubectl apply -f calico.yaml
kubectl get pod -n kube-system 

9.4、拿掉mster节点上的污点

#查看污点 
kubectl describe node k8s-master1 | grep -i taint
kubectl describe node k8s-master2 | grep -i taint
kubectl describe node k8s-master3 | grep -i taint
---

Taints:             node-role.kubernetes.io/control-plane:NoSchedule
---
#去除污点
kubectl taint node k8s-master1 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node k8s-master2 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node k8s-master3 node-role.kubernetes.io/control-plane:NoSchedule-

10、部署metric-server

 #下载地址
 https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml
 
 #修改镜像源
 vim components.yaml
 ---
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls
        image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.4
        imagePullPolicy: IfNotPresent
 ---
 
 kubectl apply -f components.yaml

在这里插入图片描述

kubectl top pod -n kube-system
kubectl top node

在这里插入图片描述

11、部署dashboard

11.1 部署dashborad

11.1.1、下载配置文件
https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
目前最新版本v2.7.0 

vim recommended.yaml

----
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard
----

kubectl apply -f recommended.yaml
kubectl get pods -n kubernetes-dashboard
kubectl get pods,svc -n kubernetes-dashboard

在这里插入图片描述
在这里插入图片描述

11.1.2、创建用户
创建用户:
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml

kubectl apply -f dashboard-user.yaml

在这里插入图片描述

11.1.3创建token
kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6ImlLTi04MkhvQ2Z4QklVdThHaGdsYTllUUNMRWhfRHBBUnVyNXplUjM0REEifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLnpoYW5nbHVvLmxvY2FsIl0sImV4cCI6MTcwMDQ3NDAzOSwiaWF0IjoxNzAwNDcwNDM5LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMuemhhbmdsdW8ubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6Imt1YmVybmV0ZXMtZGFzaGJvYXJkIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWluLXVzZXIiLCJ1aWQiOiI5NWQxNGRkOS00MzEwLTQ0M2YtOWJiOS05NDg5N2E3MzUyMmQifX0sIm5iZiI6MTcwMDQ3MDQzOSwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.jrlpdqU96c462mI0cB2Rc9Fe4wDjjgsEOY1szDglvs-ge3-FnFb_pcf27l1wIZxQCpstNJWa1qMRWGjFV6PEZKs797U4Mv6DY48UllApRWNtYNKkISVV2Fa2Ap8pf8eIKw9NYU__ERsNR_bD9gIwryAS9bTZeIU7CvIZlVqt2111kTBe5xTTfrJrFE4VIKh2UjdumN-F_LcY_0gV8o045xnf24ATI_p3fR243lSlkbCNZxqLvn46-0Z9Yh-6FnPa2_tY5dw3S0fGo5z69itPbJLB-u3kCQhu4x7ruLP01qA_16tAsWVJDR7jqUVVmNxNn6zTxqL88FzJDarYhZyQ_A

在这里插入图片描述

11.2、登陆网页

https://ks-master1:30001
输入token:

在这里插入图片描述
在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐