kubernetes

集群图例

 kubernetes安装

主机清单

主机名	IP地址	最低配置
master	192.168.1.50	2CPU,4G内存
node-0001	192.168.1.51	2CPU,4G内存
node-0002	192.168.1.52	2CPU,4G内存
node-0003	192.168.1.53	2CPU,4G内存
node-0004	192.168.1.54	2CPU,4G内存
node-0005	192.168.1.55	2CPU,4G内存
harbor	192.168.1.30	2CPU,4G内存

安装控制节点

1,配置软件包仓库

[root@ecs-proxy s4]# rsync -av docker/ /var/localrepo/docker/
[root@ecs-proxy s4]# rsync -av kubernetes/packages/ /var/localrepo/k8s/
[root@ecs-proxy s4]# createrepo --update /var/localrepo/

2,系统环境配置

# 禁用 firewall 和 swap
[root@master ~]# sed '/swap/d' -i /etc/fstab
[root@master ~]# swapoff -a
[root@master ~]# dnf remove -y firewalld-*

3,安装软件包

[root@master ~]# vim /etc/hosts
192.168.1.30    harbor
192.168.1.50    master
192.168.1.51    node-0001
192.168.1.52    node-0002
192.168.1.53    node-0003
192.168.1.54    node-0004
192.168.1.55    node-0005
[root@master ~]# dnf install -y kubeadm kubelet kubectl containerd.io ipvsadm ipset iproute-tc

这些软件包都是用于在Linux系统上安装Kubernetes集群的。其中,kubeadm、kubelet和kubectl是Kubernetes的核心组件,而containerd.io、ipvsadm和ipset则是用于管理容器的工具。

kubeadm:用来初始化Kubernetes集群的指令。

kubelet:在Kubernetes集群中的每个节点上用来启动Pod和容器等。

kubectl:用来与Kubernetes集群通信的命令行工具。

containerd.io:是一个开源的容器运行时,它提供了一个轻量级的容器运行时环境,可以用于管理Docker容器。

ipvsadm:是一个IP虚拟服务器管理工具,可以用来配置IPv4或IPv6的负载均衡器。

ipset:是一个IP集合管理工具,可以用来配置IP地址过滤器。

[root@master ~]# containerd config default >/etc/containerd/config.toml
[root@master ~]# vim /etc/containerd/config.toml
61:     sandbox_image = "harbor:443/k8s/pause:3.9"
125:    SystemdCgroup = true
154 行新插入:
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://harbor:443"]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor:443"]
          endpoint = ["https://harbor:443"]
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor:443".tls]
          insecure_skip_verify = true
[root@master ~]# systemctl enable --now kubelet containerd

4,配置内核镜像

# 加载内核模块
[root@master ~]# cat >/etc/modules-load.d/containerd.conf<<EOF
overlay
br_netfilter
xt_conntrack
EOF
[root@master ~]# systemctl start systemd-modules-load.service 
# 设置内核参数
[root@master ~]# cat >/etc/sysctl.d/99-kubernetes-cri.conf<<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.netfilter.nf_conntrack_max = 1000000
EOF
[root@master ~]# sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf

5,导入K8s镜像

# 拷贝本阶段 kubernetes/init 目录到 master
[root@ecs-proxy ~]# rsync -av kubernetes/init 192.168.1.50:./
[root@master ~]# dnf install -y docker-ce
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json 
{
    "registry-mirrors":["https://harbor"],
    "insecure-registries":["harbor:443"]
}
[root@master ~]# systemctl enable --now docker
# 登录 harbor 仓库,上传镜像
[root@master ~]# docker login harbor:443 
Username: admin
Password: ********
Login Succeeded
[root@master ~]# docker load -i init/v1.26.0.tar.xz
[root@master ~]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/k8s/${i##*/}:${t}
    docker push harbor:443/k8s/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/k8s/${i##*/}:${t}
done

6,设置tab键

[root@master ~]# source <(kubeadm completion bash|tee /etc/bash_completion.d/kubeadm)
[root@master ~]# source <(kubectl completion bash|tee /etc/bash_completion.d/kubectl)

7.master安装

# 测试系统环境
[root@master ~]# kubeadm init --config=init/init.yaml --dry-run 2>error.log
[root@master ~]# cat error.log
[root@master ~]# rm -rf error.log /etc/kubernetes/tmp
# 主控节点初始化
[root@master ~]# kubeadm init --config=init/init.yaml |tee init/init.log
# 管理授权
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 验证安装结果
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   19s   v1.26.0

安装网络插件

# 拷贝本阶段 kubernetes/plugins 目录到 master
[root@ecs-proxy ~]# rsync -av kubernetes/plugins 192.168.1.50:./

上传镜像

[root@master ~]# cd plugins/calico
[root@master calico]# docker load -i calico.tar.xz
[root@master calico]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
    docker push harbor:443/plugins/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done

安装calico

[root@master calico]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' calico.yaml
4443:  image: docker.io/calico/cni:v3.25.0
4471:  image: docker.io/calico/cni:v3.25.0
4514:  image: docker.io/calico/node:v3.25.0
4540:  image: docker.io/calico/node:v3.25.0
4757:  image: docker.io/calico/kube-controllers:v3.25.0
[root@master calico]# kubectl apply -f calico.yaml
[root@master calico]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   23m   v1.26.0

安装计算机结点

1,获取凭证

# 查看 token
[root@master ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                
abcdef.0123456789abcdef   23h         2022-04-12T14:04:34Z
# 删除 token
[root@master ~]# kubeadm token delete abcdef.0123456789abcdef
bootstrap token "abcdef" deleted
# 创建 token
[root@master ~]# kubeadm token create --ttl=0 --print-join-command
kubeadm join 192.168.1.50:6443 --token fhf6gk.bhhvsofvd672yd41 --discovery-token-ca-cert-hash sha256:ea07de5929dab8701c1bddc347155fe51c3fb6efd2ce8a4177f6dc03d5793467
# 获取token_hash
# 1、查看安装日志  2、在创建token时候显示  3、使用 openssl 计算得到
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |openssl rsa -pubin -outform der |openssl dgst -sha256 -hex

2,node安装


[root@node ~]# 参考控制节点安装步骤2
[root@node ~]# 参考控制节点安装步骤3
[root@node ~]# 参考控制节点安装步骤4

[root@node ~]# kubeadm join 192.168.1.50:6443 --token fhf6gk.bhhvsofvd672yd41 --discovery-token-ca-cert-hash sha256:ea07de5929dab8701c1bddc347155fe51c3fb6efd2ce8a4177f6dc03d5793467
#------------------------ 在 master 节点上验证---------------------------
[root@master ~]# kubectl get nodes
NAME        STATUS   ROLES           AGE   VERSION
master      Ready    control-plane   76m   v1.26.0
node-0001   Ready    <none>          61s   v1.26.0

3,批量部署

  • 拷贝 kubernetes/nodejoin 到跳板机
[root@ecs-proxy s4]# cp -a kubernetes/nodejoin /root/
[root@ecs-proxy s4]# cd ~root/nodejoin/
[root@ecs-proxy nodejoin]# vim nodeinit.yaml
... ...
  vars:
    master: '192.168.1.50:6443'
    token: '这里改成你自己的token'
    token_hash: 'sha256:这里改成你自己的token ca hash'
... ...
[root@ecs-proxy nodejoin]# ansible-playbook nodeinit.yaml

查看集群状态

# 验证节点工作状态
[root@master flannel]# kubectl get nodes
[root@master ~]# kubectl get nodes
NAME        STATUS   ROLES           AGE   VERSION
master      Ready    control-plane   99m   v1.26.0
node-0001   Ready    <none>          23m   v1.26.0
node-0002   Ready    <none>          57s   v1.26.0
node-0003   Ready    <none>          57s   v1.26.0
node-0004   Ready    <none>          57s   v1.26.0
node-0005   Ready    <none>          57s   v1.26.0

# 验证容器工作状态
[root@master ~]# kubectl -n kube-system get pods
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-fc945b5f7-p4xnj   1/1     Running   0          77m
calico-node-6s8k2                         1/1     Running   0          59s
calico-node-bxwdd                         1/1     Running   0          59s
calico-node-d5g6x                         1/1     Running   0          77m
calico-node-lfwdh                         1/1     Running   0          59s
calico-node-qnhxr                         1/1     Running   0          59s
calico-node-sjngw                         1/1     Running   0          24m
coredns-844c6bb88b-89lzt                  1/1     Running   0          59m
coredns-844c6bb88b-qpbvk                  1/1     Running   0          59m
etcd-master                               1/1     Running   0          70m
kube-apiserver-master                     1/1     Running   0          70m
kube-controller-manager-master            1/1     Running   0          70m
kube-proxy-5xjzw                          1/1     Running   0          59s
kube-proxy-9mbh5                          1/1     Running   0          59s
kube-proxy-g2pmp                          1/1     Running   0          99m
kube-proxy-l7lpk                          1/1     Running   0          24m
kube-proxy-m6wfj                          1/1     Running   0          59s
kube-proxy-vqtt8                          1/1     Running   0          59s
kube-scheduler-master                     1/1     Running   0          70m

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐