目录

1、版本

2、主机名

3、系统配置(Master、Node节点都需要部署安装)

4、配置免密(Master节点部署)

5、部署 Docker(Master、Node节点都需要部署安装)

6、部署 Kubernetes(Master、Node节点都需要部署安装)

7、Master节点集群设置初始化

8、Work节点加入集群

9、安装 calico 网络插件

10、查看POD


1、版本

Docker: 20.10.11
kubernetes: v1.19.0
calico: v3.21.1

2、主机名

cat /etc/hosts
192.168.1.161  master
192.168.1.162  node01
192.168.1.163  node02

3、系统配置(Master、Node节点都需要部署安装)

#关闭selinux
cat /etc/selinux/config  | grep SELINUX=disabled
SELINUX=disabled

# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld

# 关闭 swap 分区
swapoff -a && free –h
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# 内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
vm.overcommit_memory = 1
EOF
sysctl -p

# 重启主机
reboot

4、配置免密(Master节点部署)

# 1. 生成keygen(执行ssh-keygen命令,一直敲击回车下去)
ssh-keygen

# 2. 查看并复制生成的pubkey
cat /root/.ssh/id_rsa.pub

# 3. 分别登陆到每个node(work)节点上,将pubkey写入/root/.ssh/authorized_keys
mkdir -p /root/.ssh
echo "<上一步骤复制的pubkey>" >> /root/.ssh/authorized_keys

#配置好后需要使用命令 ssh 192.168.1.162 登录一下试试
#首次是需要密码的

5、部署 Docker(Master、Node节点都需要部署安装)

1、查看当前的内核版本(可选)
uname -r

2、更新系统包和内核(可选)
yum -y update:升级所有包同时也升级软件和系统内核;​ 
yum -y upgrade:只升级所有包,不升级软件和系统内核

3、卸载旧版本(可选)
yum remove docker  docker-common docker-selinux docker-engine

4、安装需要的软件包
yum install -y yum-utils device-mapper-persistent-data lvm2

5、设置yum源(二选一)
yum-config-manager --add-repo http://download.docker.com/linux/centos/docker-ce.repo(中央仓库)

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo(阿里仓库)

6、查看可用的docker版本
yum list docker-ce --showduplicates | sort -r

7、安装docker
yum install -y docker-ce docker-ce-cli containerd.io

8、启动docker、设置开启自启
systemctl start docker && systemctl enable docker

9、配置镜像加速器
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://zflya4r5.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

6、部署 Kubernetes(Master、Node节点都需要部署安装)

# 执行配置k8s阿里云源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF

#安装需要的软件包
yum -y install yum-utils device-mapper-persistent-data lvm2

# 安装kubeadm、kubectl、kubelet
yum install -y kubelet-1.19.0-0 kubeadm-1.19.0-0 kubectl-1.19.0-0 --disableexclude=kubernetes

#启动kubelet 、设置开启自启
systemctl start kubelet 
systemctl enable kubelet

7、Master节点集群设置初始化

#注意cidr地址,在安装网络插件calico时需要用到

kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version=v1.19.0 --pod-network-cidr=10.244.0.0/16

#配置 kubeconfig 认证
mkdir -p $HOME/.kube
cp /etc/kubernetes/admin.conf $HOME/.kube/config

#完成认证后就可以查看pod容器了,其中coredns需要安装calico之后才能running
[root@master ~]# kubectl get pods -o wide -n kube-system 
NAME                                       READY   STATUS              RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
coredns-6d56c8448f-p6m54                   0/1     ContainerCreating   0          17m     <none>          master   <none>           <none>
coredns-6d56c8448f-qkc75                   0/1     ContainerCreating   0          17m     <none>          master   <none>           <none>
etcd-master                                1/1     Running             0          17m     192.168.1.161   master   <none>           <none>
kube-apiserver-master                      1/1     Running             0          17m     192.168.1.161   master   <none>           <none>
kube-controller-manager-master             1/1     Running             1          17m     192.168.1.161   master   <none>           <none>
kube-proxy-2sh94                           1/1     Running             0          17m     192.168.1.161   master   <none>           <none>
kube-scheduler-master                      1/1     Running             1          17m     192.168.1.161   master   <none>           <none>

8、Work节点加入集群

# 首先需要从Master节点获取,Work节点连接接入信息指令
[root@master ~]# kubeadm token create --print-join-command
kubeadm join 192.168.1.161:6443 --token 1hg3oh.bd5yxayjg8q2lp7y     --discovery-token-ca-cert-hash sha256:fbc33db7d7d70bb0d73a347ae0da86c5c0db9cd450e531d6ddd20a38bbcc0d20

# node01 节点操作
[root@node01 ~]# kubeadm join 192.168.1.161:6443 --token 1hg3oh.bd5yxayjg8q2lp7y     --discovery-token-ca-cert-hash sha256:fbc33db7d7d70bb0d73a347ae0da86c5c0db9cd450e531d6ddd20a38bbcc0d20

# node02 节点操作
[root@node02 ~]# kubeadm join 192.168.1.161:6443 --token 1hg3oh.bd5yxayjg8q2lp7y     --discovery-token-ca-cert-hash sha256:fbc33db7d7d70bb0d73a347ae0da86c5c0db9cd450e531d6ddd20a38bbcc0d20

# master节点查看集群节点
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   master   68m   v1.19.0
node01   NotReady   <none>   60m   v1.19.0
node02   NotReady   <none>   60m   v1.19.0

#需要将master节点内的etcd等证书分发到所有工作节点
#etcd的默认安装位置在 /etc/kubernetes/pki/
scp -r /etc/kubernetes/pki/etcd root@node01:/etc/kubernetes/pki/etcd
scp -r /etc/kubernetes/pki/etcd root@node02:/etc/kubernetes/pki/etcd

9、安装 calico 网络插件

# 下载部署yaml文件
[root@master ~]# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -o calico.yaml

# 修改 CALICO_IPV4POOL_CIDR,上面集群初始化定义的cidr地址
vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"

# 修改 defaultMode: 0400 修改成 defaultMode: 0040
volumes:
  # Mount in the etcd TLS secrets with mode 400.
  # See https://kubernetes.io/docs/concepts/configuration/secret/
  - name: etcd-certs
  secret:
    secretName: calico-etcd-secrets
    defaultMode: 0040
    #defaultMode: 0400

# 部署
[root@master ~]# kubectl apply -f calico.yaml

10、查看POD

[root@master ~]# kubectl get pods -o wide -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE   IP              NODE     NOMINATED NODE   READINESS GATES
calico-kube-controllers-7498f6cbdd-67t2r   1/1     Running   7          69m   192.168.1.161   master   <none>           <none>
calico-node-655wn                          1/1     Running   0          57m   192.168.1.163   node02   <none>           <none>
calico-node-7hgzv                          1/1     Running   0          57m   192.168.1.162   node01   <none>           <none>
calico-node-bqrdp                          1/1     Running   0          57m   192.168.1.161   master   <none>           <none>
coredns-6d56c8448f-p6m54                   1/1     Running   0          81m   10.244.219.66   master   <none>           <none>
coredns-6d56c8448f-qkc75                   1/1     Running   0          81m   10.244.219.65   master   <none>           <none>
etcd-master                                1/1     Running   0          81m   192.168.1.161   master   <none>           <none>
kube-apiserver-master                      1/1     Running   0          81m   192.168.1.161   master   <none>           <none>
kube-controller-manager-master             1/1     Running   2          81m   192.168.1.161   master   <none>           <none>
kube-proxy-2sh94                           1/1     Running   0          81m   192.168.1.161   master   <none>           <none>
kube-proxy-8z9zc                           1/1     Running   0          73m   192.168.1.163   node02   <none>           <none>
kube-proxy-jfxjt                           1/1     Running   0          73m   192.168.1.162   node01   <none>           <none>
kube-scheduler-master                      1/1     Running   2          81m   192.168.1.161   master   <none>           <none>

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐