原文地址: https://blog.csdn.net/qadlr/article/details/139680236

主机配置

系统名称设置及hosts相互访问文件

hostnamectl set-hostname master01

echo -e "\n172.21.43.81 master01\n172.21.43.12 master02\n172.21.43.98 master03\n172.21.43.180 node01\n172.21.43.15 node02\n172.21.43.127 node03\n172.21.43.195 node04\n172.21.43.205 node05\n172.21.43.28 node06" >> /etc/hosts

数据盘挂载到contained数据目录(临港虚拟机专属步骤)

parted /dev/vdb mklabel gpt
parted -a opt /dev/vdb mkpart primary ext4 0% 100%
mkdir -p /var/lib/containerd
chmod 777 /var/lib/containerd
# 等几秒
mkfs.ext4 /dev/vdb1
UUID=$(blkid -s UUID -o value /dev/vdb1)
mount /dev/vdb1  /var/lib/containerd
# 配置开机自动挂载
echo "UUID=${UUID} /var/lib/containerd ext4 defaults 0 2" >> /etc/fstab

配置免密

# master01执行,一直按 enter 键
ssh-keygen
# 将本地生成的秘钥文件和私钥文件拷贝到远程主机 ssh-copy-id root@远程主机ip地址
ssh-copy-id master02
ssh-copy-id node01
...
# 验证
ssh node01

ipvs配置

# 配置内核参数,将桥接的IPv4流量传递到iptables的链
cat << EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# 生效
modprobe overlay
modprobe br_netfilter
# 查看
lsmod | grep -e overlay -e br_netfilter

cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
# 查看 
sysctl -a | grep ip_forward

apt update
apt install ipset ipvsadm
cat << EOF | tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

cat  << EOF | tee ipvs.sh
#!/bin/sh
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
bash ipvs.sh
# 查看
lsmod | grep ip_vs

关闭交换分区

free -m
# 临时关闭:
swapoff -a
# 永久关闭:(是重启生效,想要立即生效,需要执行下上面的临时关闭命令)
vim /etc/fstab
# 注释掉带有swap关键字的一行,如下
/swap.img       none    swap    sw      0       0

软件依赖安装

安装contained

前往 https://github.com/containerd/containerd/releases 查看最新版本 修改下行的版本号(1.7.17)下载即可:(下载的是 cri-containerd-XXX-linux-amd64.tar.gz)

wget https://github.com/containerd/containerd/releases/download/v1.7.17/cri-containerd-1.7.17-linux-amd64.tar.gz
# 解压:
tar xf cri-containerd-1.7.17-linux-amd64.tar.gz -C /
# 查看:
which containerd && which runc && containerd --version && runc --version
## 启动配置
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
vim /etc/containerd/config.toml
sandbox_image 行修改为 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
SystemdCgroup 行 false 改为 true
systemctl enable --now containerd
## 查看 
systemctl status containerd
# 镜像导入
sh ./import.sh
# 执行过程中会出现 device or resource busy: unknown 问题,在脚本执行完毕后,需要执行下面umount命令手动卸载。
# 最好再确认下镜像有没有正常导入
umount /var/lib/containerd/tmpmounts/containerd-mount*

安装k8s组件

curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernets.list
apt-get update
# 查看
apt-cache policy kubeadm
# 安装
apt-get install -y kubelet=1.30.0-1.1 kubeadm=1.30.0-1.1 kubectl=1.30.0-1.1
# 配置kubelet
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
# 设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet
# 锁定版本防止被apt自动更新
apt-mark hold kubelet kubeadm kubectl
# 解锁
apt-mark unhold kubelet kubeadm kubectl
# 查看需要准备的镜像
kubeadm config images list
# 提前下载镜像
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
# 查看
crictl images

通过keepalived+nginx 实现k8s apiserver节点高可用(只在master节点上操作)

apt-get update
apt-get install -y keepalived nginx-extras
# nginx
cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
vim /etc/nginx/nginx.conf # 将下方 nginx.conf 文件写入
systemctl enable nginx --now
# 查看
netstat -ano | grep LISTEN # 发现没有监听16443端口,需要重启下nginx 
systemctl stop nginx
systemctl start nginx
# keepalived
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
vim  /etc/keepalived/keepalived.conf # 将下方 keepalived.conf 文件写入
vim /etc/keepalived/nginx_chk.sh # 将下方 nginx_chk.conf 文件写入
sudo chmod +x /etc/keepalived/nginx_chk.sh
systemctl enable keepalived --now
## ip addr查看出现了 172.21.43.100/32的虚拟VIP地址
ping 172.21.43.100
systemctl stop keepalived
# 可以看到vip地址飘到了优先级90的节点上去了,在优先级100的keepalived起来后,又会重新飘回来
nginx.conf
load_module /usr/lib/nginx/modules/ngx_stream_module.so;

user root;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
    worker_connections 1024;
}
 
stream {
 log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
 access_log /var/log/nginx/k8s-access.log main;
 upstream k8s-apiserver {
   server 172.21.43.81:6443;
   server 172.21.43.12:6443;
   server 172.21.43.98:6443;
 
 }
 server {
  listen 16443;  #由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
  proxy_pass k8s-apiserver;
 }
}
 
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 4096;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;
   #include /etc/nginx/conf.d/*.conf;
    server {
        listen       80 default_server;
        server_name  _;
        location = / {
        }
    }
}
keepalived.conf
global_defs {
   router_id 172.21.43.98  ## 配置节点ip
}
vrrp_script chk_nginx {
    script "/etc/keepalived/nginx_chk.sh"  
    interval 2  
}
vrrp_instance VI_1{
    state MASTER
    interface eth0 ## 配置节点ip的网卡名称,ip addr可以查看
    virtual_router_id 100
    priority 90 #优先级,主服务器设置100,备服务器设置90、80 ...
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1369
    }
    virtual_ipaddress {
        172.21.43.100 ##配置虚拟VIP地址
    }
    track_script {   
        chk_nginx
    }
}
nginx_chk.sh
#!/bin/bash
#检查是否有nginx相关的进程
A=`ps -C nginx --no-header |wc -l`
#如果没有  
if [ $A -eq 0 ];then
    # 重启nginx,延迟2秒 
    service nginx restart
    sleep 2  
    # 重新检查是否有nginx相关的进程
    if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then  
        # 仍然没有nginx相关的进程,杀死当前keepalived,切换到备用机
        killall keepalived  
    fi  
fi

集群初始化

单master节点

# 配置init启动文件
kubeadm config print init-defaults > kubeadm-config.yaml
vim kubeadm-config.yaml
1. 修改 advertiseAddress 为节点的ip
2. 将nodeRegistration下面的name改为节点名称:k8s-master01
3. 修改镜像仓库为阿里云的 imageRepository: registry.aliyuncs.com/google_containers
4. serviceSubnet下面增加一行  podSubnet: 10.244.0.0/16
5.增加(前面有修改kubelet配置文件 KUBELET_EXTRA_ARGS="--cgroup-driver=systemd",此处则不需要此步骤 )
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
6. 增加ipvs模式
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs                      ##开启ipvs
# 执行 init
kubeadm init --config kubeadm-config.yaml
# 会输出到控制台需要执行的操作
1.配置kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
2.join
kubeadm join 10.2.1.15:6443 --token abcdef --discovery-token-ca-cert-hash sha256:8584c456d27f
# 失败的话需要执行 kubeadm reset -f 将当前节点恢复为未安装 Kubernetes 的状态

多master节点

vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: 1.30.0
imageRepository: registry.aliyuncs.com/google_containers    ##更改国内镜像源
controlPlaneEndpoint: 172.21.43.100:16443                ##虚拟VIP地址+端口
apiServer:
 certSANs:
 - 172.21.43.100               ###添加虚拟VIP地址
networking:
 podSubnet: 10.244.0.0/16
 serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs                     ##开启ipvs
# 执行 init
kubeadm init --config kubeadm-config.yaml
# 会输出到控制台需要执行的操作
1.配置kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 所有master节点都有输出此步骤到控制台,需要执行
2.join
kubeadm join 10.2.1.15:6443 --token abcdef --discovery-token-ca-cert-hash sha256:8584c456d27f
# 其中master节点 join 需要在最后加上--control-plane 例:
kubeadm join 10.2.1.15:6443 --token abcdef --discovery-token-ca-cert-hash sha256:8584c456d27f --control-plane 
# 失败的话需要执行 kubeadm reset -f 将当前节点恢复为未安装 Kubernetes 的状态

传文件(join之前)

mkdir -p /etc/kubernetes/pki/etcd # master01不需要执行
# 从master01传输到其他master节点
scp /etc/kubernetes/pki/ca.* master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* master02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf master02:/etc/kubernetes/
# 从master01传输到其他node节点
scp /etc/kubernetes/admin.conf node01:/etc/kubernetes/

安装集群网络CNI-calico

文件源

https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart

# 去除污点
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
# 找到Install Calico位置执行crd资源安装;如下行: 
# 当环境连不上github,可以本地下载上传,git代码仓位置如下,点进要下载的文件,右上角有下载按钮
# https://github.com/projectcalico/calico/tree/v3.28.0/manifests
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/tigera-operator.yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/custom-resources.yaml
vim custom-resources.yaml# 修改pod网段 cidr 那一行: 为前面设置的10.244.0.0/16
kubectl create -f custom-resources.yaml
# 加回污点,每个master节点都加回
kubectl taint nodes master01 node-role.kubernetes.io/control-plane:NoSchedule

安装kubernetes dashboard

https://github.com/kubernetes/dashboard/tree/web/v1.4.0

# Add kubernetes-dashboard repository
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# Deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
# 访问配置
kubectl patch svc kubernetes-dashboard-kong-proxy -n kubernetes-dashboard -p '{"spec":{"type":"NodePort"}}'
# 查看 svc 创建出来的nodeport(port: 443所生成的nodeport)
kubectl get svc -nkubernetes-dashboard kubernetes-dashboard-kong-proxy -oyaml
# 登录参考 https://github.com/kubernetes/dashboard/blob/web/v1.4.0/docs/user/access-control/creating-sample-user.md
vim admin-user.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"   
type: kubernetes.io/service-account-token
---
kubectl create -f admin-user.yaml
# After Secret is created, we can execute the following command to get the token which saved in the Secret:
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d

给公司打个广告:
公司官网(https://lianhuazixing.com),租用GPU请前往莲花紫星云(https://shfengyu.cn)

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐