1、环境

1.1、服务器统计

类型服务器ip备注
harbor(1台)10.10.99.26
Master(2台)10.10.99.21/22
Node(3台)10.10.99.23/24/25
Haproxy(2台)10.10.99.18/19

1.2、主机名设置

类型服务器ip主机名备注
harbor10.10.99.26harbor.ldy.cn
Master1、ansible10.10.99.21k8s-master1.ldy.cnvip:10.10.99.17
Master210.10.99.22k8s-master2.ldy.cnvip:10.10.99.17
node110.10.99.23k8s-node1.ldy.cn
node210.10.99.24k8s-node2.ldy.cn
node310.10.99.25k8s-node3.ldy.cn
haproxy110.10.99.18haproxy1.ldy.cn
haproxy210.10.99.19haproxy2.ldy.cn

1.3、软件环境

端口:10.10.99.17:6443

dashboard端口:8443

操作系统:Ubuntu18.04

k8s版本:v1.20.11

flannel:v0.13.0-amd64(calico部署失败,不清楚原因)

1.4、基础准备

1、更新/etc/sysctl.conf、limit.conf

# 发现有下面几个报错:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-arptables: No such file or directory

# 解决
modprobe br_netfilter

# 另一个报错, 内核4.14之后废弃了
sysctl: cannot stat /proc/sys/net/ipv4/tcp_tw_recycle: No such file or directory

# 解决
配置文件将它删除

2、关闭交换分区

2、开始部署

2.1、keepalived

设置为非抢占式。

非抢占式实例的角色都是BACKUP,并且开启

节点1

root@haproxy1:~# cat /etc/keepalived/keepalived.conf 
vrrp_instance VI_1 {
    state BACKUP               //角色
    nopreempt                  //非抢占
    interface ens3             //绑定的网卡设备
    virtual_router_id 1        //虚拟路由标识,同一个vrrp实例使用唯一的标识
    priority 100               //主节点大
    advert_int 3               //心跳检测间隔
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.10.99.17 dev ens3 label ens3:1
    }
}

节点2

root@haproxy2:/etc/keepalived# cat keepalived.conf 
vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens3
    virtual_router_id 1
    priority 50
    advert_int 3
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.10.99.17 dev ens3 label ens3:1
    }
}

2.2、haproxy

节点1

root@haproxy1:/etc/keepalived# cat /etc/haproxy/haproxy.cfg 
listen stats
 mode http
 bind 0.0.0.0:9999
 stats enable
 log global
 stats uri /haproxy-status
 stats auth ldysdy:123456


listen k8s-6443
   bind 10.10.99.17:6443
   mode tcp
   server 10.10.99.21 10.10.99.21:6443 check inter 2s fall 3 rise 5
   server 10.10.99.22 10.10.99.22:6443 check inter 2s fall 3 rise 5

节点2

root@haproxy2:/etc/haproxy# cat haproxy.cfg
listen stats
 mode http
 bind 0.0.0.0:9999
 stats enable
 log global
 stats uri /haproxy-status
 stats auth ldysdy:123456


listen k8s-6443
   bind 10.10.99.17:6443
   mode tcp
   server 10.10.99.21 10.10.99.21:6443 check inter 2s fall 3 rise 5
   server 10.10.99.22 10.10.99.22:6443 check inter 2s fall 3 rise 5

2.3、harbor

2.3.1、环境

docker:20.10.10

compose:2.1.1

harbor:2.2.2

参考2.2.2文档

配置文件:(只列出需要注意的地方,其他自己看着修改)

1、不使用https,将https那一块注释
2、日志保存路径自定义后,自己去创建一下,不然待会会提示一个小报错。
2.3.2、安装完

界面登录后,自己先创建一个上传镜像的仓库,然后在使用docker登录,上传镜像

2.4、安装docker(可省略)

//安装证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -

后面略,

apt install -y docker-ce=5:19.03.13~3-0~ubuntu-bionic

root@master2:~# docker version
Client: Docker Engine - Community
 Version:           20.10.10
 API version:       1.40
 Go version:        go1.16.9
 Git commit:        b485636
 Built:             Mon Oct 25 07:42:57 2021
 OS/Arch:           linux/amd64
 Context:           default
 Experimental:      true

Server: Docker Engine - Community
 Engine:
  Version:          19.03.13
  API version:      1.40 (minimum version 1.12)
  Go version:       go1.13.15
  Git commit:       4484c46d9d
  Built:            Wed Sep 16 17:01:06 2020
  OS/Arch:          linux/amd64
  Experimental:     false
 containerd:
  Version:          1.4.11
  GitCommit:        5b46e404f6b9f661a205e28d59c982d3634148f8
 runc:
  Version:          1.0.2
  GitCommit:        v1.0.2-0-g52b36a2
 docker-init:
  Version:          0.18.0
  GitCommit:        fec3683

2.5、密钥对

root@master1:~# apt install -y sshpass

root@master1:~# ssh-keygen

root@master1:~# cat rsa_scp.sh 
#!/bin/bash
IP="
10.10.99.21
10.10.99.22
10.10.99.23
10.10.99.24
10.10.99.25
"
for node in ${IP};do
 sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
 if [ $? -eq 0 ];then
 echo "${node} 秘钥copy完成"
 else
 echo "${node} 秘钥copy失败"
 fi
done

2.6、ansible

k8s集群各节点

root@master1:~# apt-get install python2.7

root@master1:~# ln -s /usr/bin/python2.7 /usr/bin/python

ansible节点

root@master1:~# curl -O https://bootstrap.pypa.io/pip/2.7/get-pip.py

root@master1:~# python get-pip.py

root@master1:~# python -m pip install --upgrade "pip < 21.0"

root@master1:~# pip install ansible -i https://mirrors.aliyun.com/pypi/simple/

三、部署k8s

3.1、下载kubeasz项目

# 下载工具脚本ezdown
export release=3.1.1  //选择一个最新的版本就行,当前最新版本为3.1.1
curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
chmod +x ./ezdown

root@master1:~# vim ezdown  //将里面的k8s版本换成已安装docker匹配的版本。
K8S_BIN_VER=v1.20.11

# 使用工具脚本下载,脚本运行成功后,所有文件(kubeasz代码、二进制、离线镜像)均已整理好放入目录/etc/kubeasz
./ezdown -D

#创建集群配置实例
root@master1:~# cd /etc/kubeasz/
root@master1:/etc/kubeasz# ./ezctl new k8s-01

3.2、规划配置文件

1、hosts文件

[etcd]
[kube_master]
[kube_node]
[ex_lb]  //外部负载均衡

CLUSTER_NETWORK="calico" //网络组件类型
SERVICE_CIDR="172.30.0.0/16"    //service地址范围
CLUSTER_CIDR="172.31.0.0/16"    //pod地址方范围
NODE_PORT_RANGE="30000-60000"   //端口范围
CLUSTER_DNS_DOMAIN="ldy.local"
bin_dir="/usr/bin/

2、

INSECURE_REG: '["harbor.ldy.cn"]'  //部署docker时,会传递此参数
MAX_PODS: 300

SANDBOX_IMAGE: "harbor.ldy.cn/ldy/pause-amd64 

dns_install: "no"
ENABLE_LOCAL_DNS_CACHE: false

metricsserver_install: "no"

ingress_install: "no"

3.3、开始部署

01.prepare.yml

- hosts:
  - kube_master
  - kube_node
  - etcd
ezctl setup k8s-01 01

02.etcd.yml

export NODE_IPS="10.10.99.23 10.10.99.24 10.10.99.25"

for ip in ${NODE_IPS}; do
  ETCDCTL_API=3 etcdctl \
  --endpoints=https://${ip}:2379  \
  --cacert=/etc/kubernetes/ssl/ca.pem \
  --cert=/etc/kubernetes/ssl/etcd.pem \
  --key=/etc/kubernetes/ssl/etcd-key.pem \
  endpoint health; done

06.network.yml
flannel省略

calico

root@master1:/etc/kubeasz/roles# grep image /etc/kubeasz/roles/calico/templates/calico-v3.19.yaml.j2|awk '{print $NF}'
docker.io/calico/cni:v3.19.2
docker.io/calico/pod2daemon-flexvol:v3.19.2
docker.io/calico/node:v3.19.2
docker.io/calico/kube-controllers:v3.19.2
# ezctl setup k8s-01 01
# ezctl setup k8s-01 02
# ezctl setup k8s-01 03
# ezctl setup k8s-01 04
# ezctl setup k8s-01 05
# ezctl setup k8s-01 06

3.4、插件安装

3.4.1、coredns两种方法思路

法1、点击这里进入官方部署方法
法2、通过kubernetes获取yaml文件部署
我的版本:1.8。6

3.5、dashboard

去官方找对应k8s版本,然后下载yaml文件,修改一些东西:
①、镜像
②、向宿主机暴漏service端口号
我的版本:2.4.0

创建admin用户

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: admin
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
kubectl create -f admin.yaml
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐