本次环境这里添加了一个k8s-05 ip为192.168.0.14的新node节点,添加多个node节点的步骤和方式相同,这里不一一举例

没有特殊说明的都在k8s-01上进行操作

首先我们需要修改host主机,添加k8s-05

 
  1. #在之前的master节点添加
  2. echo "192.168.0.14 k8s-05" >>/etc/hosts
  3. #在k8s-05操作
  4. cat >>/etc/hosts<<EOF
  5. 192.168.0.10 k8s-01
  6. 192.168.0.11 k8s-02
  7. 192.168.0.12 k8s-03
  8. 192.168.0.13 k8s-04
  9. EOF

检查host解析

 
  1. [root@k8s-01 ~]# ping -c 1 k8s-05
  2. PING k8s-05 (192.168.0.14) 56(84) bytes of data.
  3. 64 bytes from k8s-05 (192.168.0.14): icmp_seq=1 ttl=64 time=0.910 ms
  4. --- k8s-05 ping statistics ---
  5. 1 packets transmitted, 1 received, 0% packet loss, time 0ms
  6. rtt min/avg/max/mdev = 0.910/0.910/0.910/0.000 ms

分发秘钥

 
  1. cd
  2. ssh-copy-id -i ~/.ssh/id_rsa.pub k8s-05
  3. #为了后面拷贝证书等文件方便快捷,这里继续分发秘钥

在k8s-05节点创建K8s相关目录

 
  1. mkdir -p /opt/k8s/{bin,work} /etc/{kubernetes,etcd}/cert

推送CA证书

 
  1. cd /etc/kubernetes/cert
  2. scp ca.pem ca-config.json k8s-05:/etc/kubernetes/cert/

flanneld部署

 
  1. cd /opt/k8s/work/flannel
  2. scp flanneld mk-docker-opts.sh k8s-05:/opt/k8s/bin/

拷贝flanneld密钥

 
  1. ssh k8s-05 "mkdir -p /etc/flanneld/cert"
  2. scp /etc/flanneld/cert/flanneld*.pem k8s-05:/etc/flanneld/cert

拷贝flannel启动文件

 
  1. scp /etc/systemd/system/flanneld.service k8s-05:/etc/systemd/system/
  2. #启动flannel
  3. ssh k8s-05 "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld"
  4. #检查是否启动成功
  5. ssh k8s-05 "systemctl status flanneld|grep Active"

查看etcd网络数据

新增flannel会注册到etcd

 
  1. source /opt/k8s/bin/environment.sh
  2. etcdctl \
  3. --endpoints=${ETCD_ENDPOINTS} \
  4. --ca-file=/etc/kubernetes/cert/ca.pem \
  5. --cert-file=/etc/flanneld/cert/flanneld.pem \
  6. --key-file=/etc/flanneld/cert/flanneld-key.pem \
  7. ls ${FLANNEL_ETCD_PREFIX}/subnets

正常结果如下

image_1e12n9sd91d1v8c18rj1dl11jqr9.png-197.9kB

上面的步骤结束后我们flannel网络就设置完毕


Kubernetes Node 节点安装Docker

这里我们直接在k8s-05节点安装docker

 
  1. yum install -y yum-utils device-mapper-persistent-data lvm2
  2. yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  3. yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget
  4. yum makecache fast
  5. yum -y install docker-ce

创建配置文件

 
  1. mkdir -p /etc/docker/
  2. cat > /etc/docker/daemon.json <<EOF
  3. {
  4. "exec-opts": ["native.cgroupdriver=systemd"],
  5. "registry-mirrors": ["https://hjvrgh7a.mirror.aliyuncs.com"],
  6. "log-driver": "json-file",
  7. "log-opts": {
  8. "max-size": "100m"
  9. },
  10. "storage-driver": "overlay2"
  11. }
  12. EOF

修改配置文件

 
  1. vim /usr/lib/systemd/system/docker.service

image_1e12nfvlu1lcc139m1sj167l1ufkm.png-182.8kB

在配置文件14行删除原来并添加下面的参数

 
  1. EnvironmentFile=-/run/flannel/docker
  2. ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock

启动docker并检查服务状态

 
  1. systemctl daemon-reload && systemctl enable docker && systemctl restart docker
  2. systemctl status docker|grep Active

查看docke0网桥是否正常

 
  1. [root@k8s-05 ~]# ip addr show flannel.1 && /usr/sbin/ip addr show docker0
  2. 3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
  3. link/ether 1a:c7:47:3a:f5:4e brd ff:ff:ff:ff:ff:ff
  4. inet 172.30.64.0/32 scope global flannel.1
  5. valid_lft forever preferred_lft forever
  6. inet6 fe80::18c7:47ff:fe3a:f54e/64 scope link
  7. valid_lft forever preferred_lft forever
  8. 4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
  9. link/ether 02:42:99:c3:03:9d brd ff:ff:ff:ff:ff:ff
  10. inet 172.30.64.1/21 brd 172.30.71.255 scope global docker0
  11. valid_lft forever preferred_lft forever

安装kubelet

创建kubelet bootstrap kubeconfig文件

 
  1. #k8s-01操作
  2. cd /opt/k8s/work
  3. export BOOTSTRAP_TOKEN=$(kubeadm token create \
  4. --description kubelet-bootstrap-token \
  5. --groups system:bootstrappers:k8s-05 \
  6. --kubeconfig ~/.kube/config)
  7. # 设置集群参数
  8. kubectl config set-cluster kubernetes \
  9. --certificate-authority=/etc/kubernetes/cert/ca.pem \
  10. --embed-certs=true \
  11. --server=https://192.168.0.100:8443 \
  12. --kubeconfig=kubelet-bootstrap-k8s-05.kubeconfig
  13. # 设置客户端认证参数
  14. kubectl config set-credentials kubelet-bootstrap \
  15. --token=${BOOTSTRAP_TOKEN} \
  16. --kubeconfig=kubelet-bootstrap-k8s-05.kubeconfig
  17. # 设置上下文参数
  18. kubectl config set-context default \
  19. --cluster=kubernetes \
  20. --user=kubelet-bootstrap \
  21. --kubeconfig=kubelet-bootstrap-k8s-05.kubeconfig
  22. # 设置默认上下文
  23. kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-05.kubeconfig

分发kubeconfig

 
  1. cd /opt/k8s/work
  2. scp kubelet-bootstrap-k8s-05.kubeconfig k8s-05:/etc/kubernetes/kubelet-bootstrap.kubeconfig

查看kubeadm为各个节点创建的token

 
  1. $ kubeadm token list --kubeconfig ~/.kube/config
  2. TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
  3. 2bd2l8.48aqiyi70ilmyapd 23h 2020-02-15T15:18:04-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-03
  4. 2juok7.m3ovzxlplynkidg2 23h 2020-02-15T15:18:04-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-02
  5. 4tco8p.fnzj1yfvsx5hkf2e 23h 2020-02-15T15:18:05-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-04
  6. 8kw20m.ehj3git0b2e1bwkc 23h 2020-02-15T15:17:56-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-01
  7. j1olh5.qzktcctz5kcaywqk 23h 2020-02-15T16:02:01-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-05
  8. kktiu3.3a1adkatjo4zjuqh 23h 2020-02-15T15:17:57-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-02
  9. nzkb4r.g63nm9qqbq2e474q 23h 2020-02-15T15:18:04-05:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-01
  10. #目前这里我们已经可以看到K8s-05节点的信息

查看各token关联的Secret

 
  1. $ kubectl get secrets -n kube-system|grep bootstrap-token
  2. bootstrap-token-2bd2l8 bootstrap.kubernetes.io/token 7 47m
  3. bootstrap-token-2juok7 bootstrap.kubernetes.io/token 7 47m
  4. bootstrap-token-4tco8p bootstrap.kubernetes.io/token 7 47m
  5. bootstrap-token-8kw20m bootstrap.kubernetes.io/token 7 47m
  6. bootstrap-token-j1olh5 bootstrap.kubernetes.io/token 7 3m29s
  7. bootstrap-token-kktiu3 bootstrap.kubernetes.io/token 7 47m
  8. bootstrap-token-nzkb4r bootstrap.kubernetes.io/token 7 47m
  9. #可以看到已经有一个新建的

创建和分发kubelet参数配置

 
  1. cd /opt/k8s/work
  2. sed -e "s/##NODE_IP##/192.168.0.14/" kubelet-config.yaml.template > kubelet-config-192.168.0.14.yaml.template
  3. scp kubelet-config-192.168.0.14.yaml.template root@k8s-05:/etc/kubernetes/kubelet-config.yaml

拷贝kubelet启动文件

 
  1. cd /opt/k8s/work
  2. source /opt/k8s/bin/environment.sh
  3. sed -e "s/##NODE_NAME##/k8s-05/" kubelet.service.template > kubelet-k8s-05.service
  4. scp kubelet-k8s-05.service root@k8s-05:/etc/systemd/system/kubelet.service

拷贝kubelet命令

 
  1. scp /opt/k8s/bin/kubelet k8s-05:/opt/k8s/bin/

启动kubelet

 
  1. cd /opt/k8s/work
  2. source /opt/k8s/bin/environment.sh
  3. ssh root@k8s-05 "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/"
  4. ssh root@k8s-05 "/usr/sbin/swapoff -a"
  5. ssh root@k8s-05 "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"

手动approve server cert csr

稍等片刻后,需要手动通过证书请求

基于安全考虑,CSR approving controllers不会自动approve kubelet server证书签名请求,需要手动approve

 
  1. kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve

当我们再次查看节点

 
  1. [root@k8s-01 work]# kubectl get node
  2. NAME STATUS ROLES AGE VERSION
  3. k8s-01 Ready 57m v1.14.2
  4. k8s-02 Ready 57m v1.14.2
  5. k8s-03 Ready 57m v1.14.2
  6. k8s-04 Ready 57m v1.14.2
  7. k8s-05 Ready 51s v1.14.2

安装kube-proxy

此处均在 node01 上执行

推送kube-proxy二进制启动文件

 
  1. cd /opt/k8s/work/
  2. scp kubernetes/server/bin/kube-proxy k8s-05:/opt/k8s/bin/

分发kubeconfig文件

 
  1. cd /opt/k8s/work/
  2. scp kube-proxy.kubeconfig root@k8s-05:/etc/kubernetes/

分发和创建kube-proxy配置文件

 
  1. cd /opt/k8s/work/
  2. sed -e "s/##NODE_NAME##/k8s-05/" -e "s/##NODE_IP##/10.0.20.15/" kube-proxy-config.yaml.template > kube-proxy-config-k8s-05.yaml.template
  3. scp kube-proxy-config-k8s-05.yaml.template root@k8s-05:/etc/kubernetes/kube-proxy-config.yaml

分发kube-proxy systemd unit文件

 
  1. scp kube-proxy.service root@k8s-05:/etc/systemd/system/

启动kube-proxy服务

 
  1. cd /opt/k8s/work
  2. source /opt/k8s/bin/environment.sh
  3. ssh root@k8s-05 "mkdir -p ${K8S_DIR}/kube-proxy"
  4. ssh root@k8s-05 "modprobe ip_vs_rr"
  5. ssh root@k8s-05 "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"

检查启动结果

 
  1. ssh root@k8s-05 "systemctl status kube-proxy|grep Active"
  2. ssh root@k8s-05 "netstat -lnpt|grep kube-prox"

检查ipvs路由规则

 
  1. ssh root@k8s-05 "/usr/sbin/ipvsadm -ln"

这里所有节点安装完毕,可以创建Pod进行测试

 
  1. cd /opt/k8s/work
  2. cat > nginx-ds.yml <<EOF
  3. apiVersion: v1
  4. kind: Service
  5. metadata:
  6. name: nginx-ds
  7. labels:
  8. app: nginx-ds
  9. spec:
  10. type: NodePort
  11. selector:
  12. app: nginx-ds
  13. ports:
  14. - name: http
  15. port: 80
  16. targetPort: 80
  17. ---
  18. apiVersion: extensions/v1beta1
  19. kind: DaemonSet
  20. metadata:
  21. name: nginx-ds
  22. labels:
  23. addonmanager.kubernetes.io/mode: Reconcile
  24. spec:
  25. template:
  26. metadata:
  27. labels:
  28. app: nginx-ds
  29. spec:
  30. containers:
  31. - name: my-nginx
  32. image: daocloud.io/library/nginx:1.13.0-alpine
  33. ports:
  34. - containerPort: 80
  35. EOF
  36. kubectl apply -f /opt/k8s/work/nginx-ds.yml

检查结果

 
  1. kubectl get pod -o wide
  2. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  3. busybox 1/1 Running 1 63m 172.30.88.4 k8s-01
  4. nginx-ds-25w2v 1/1 Running 0 65m 172.30.104.2 k8s-03
  5. nginx-ds-2b6mn 1/1 Running 0 65m 172.30.240.2 k8s-04
  6. nginx-ds-6rsm4 1/1 Running 0 12m 172.30.64.2 k8s-05
  7. nginx-ds-n58rv 1/1 Running 0 65m 172.30.88.2 k8s-01
  8. nginx-ds-zvnx2 1/1 Running 0 65m 172.30.184.2 k8s-02

Related posts:

  1. Kubernetes 1.14 二进制集群安装
  2. Kubenetes 1.13.5 集群二进制安装
  3. Kuerbernetes 1.11 集群二进制安装
  4. CentOS 7 ETCD集群配置大全

转载至https://i4t.com/4508.html?__cf_chl_jschl_tk__=pmd_3eea18243279db7905c67ccd1a24b0c63a826b6c-1627395006-0-gqNtZGzNAc2jcnBszQX6

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐