k8s集群部署


kubeadm是官方社区推出的一个用于快速部署kubernetes集群的工具。

这个工具能通过两条指令完成一个kubernetes集群的部署:

# 创建一个 Master 节点
$ kubeadm init

# 将一个 Node 节点加入到当前集群中
$ kubeadm join <Master节点的IP和端口>

官网:Kubernetes

官方文档:Kubernetes Documentation |Kubernetes

环境说明:

主机IP硬盘
master/CentOS7192.168.220.1730G
node1/CentOS7192.168.220.2030G
node2/CentOS7192.168.220.2130G
准备开始
  • 一台兼容的 Linux 主机。Kubernetes 项目为基于 Debian 和 Red Hat 的 Linux 发行版以及一些不提供包管理器的发行版提供通用的指令
  • 每台机器 2 GB 或更多的 RAM (如果少于这个数字将会影响你应用的运行内存)
  • 2 CPU 核或更多
  • 集群中的所有机器的网络彼此均能相互连接(公网和内网都可以)
  • 节点之中不可以有重复的主机名、MAC 地址或 product_uuid。请参见这里了解更多详细信息。
  • 开启机器上的某些端口。请参见这里 了解更多详细信息。
  • 禁用交换分区。为了保证 kubelet 正常工作,你 必须 禁用交换分区。

准备工作

# 修改三台主机主机名
[root@localhost ~]# hostnamectl set-hostname master.example.com
[root@localhost ~]# bash
[root@master ~]#

[root@localhost ~]# hostnamectl set-hostname node1.example.com
[root@localhost ~]# bash
[root@node1 ~]#

[root@localhost ~]# hostnamectl set-hostname node2.example.com
[root@localhost ~]# bash
[root@node2 ~]#


# 关闭三台主机firewall和seLinux
[root@master ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

[root@master ~]# sed -ri 's/(SELINUX=).*/\1disabled/g' /etc/selinux/config
[root@master ~]# setenforce 0
[root@master ~]# getenforce 
Permissive
[root@master ~]# reboot
[root@master ~]# getenforce 
Disabled


# 三台都删除或注释掉swap空间
[root@master ~]# free -m    # 关闭swap之前
              total        used        free      shared  buff/cache   available
Mem:           2797         241        2342          16         213        2386
Swap:          3071           0        3071

[root@master ~]# cat /etc/fstab 
#
# /etc/fstab
# Created by anaconda on Sat Dec 18 13:26:48 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/cs-root     /                       xfs     defaults        0 0
UUID=67bfc26e-88da-45ec-914c-3f28ec9571fb /boot                   xfs     defaults        0 0
#/dev/mapper/cs-swap     none                    swap    defaults        0 0                  # 删除或注释掉

[root@master ~]# reboot
[root@master ~]# free -m   # 关闭swap之后
              total        used        free      shared  buff/cache   available
Mem:           1789         211        1359           8         218        1416
Swap:             0           0           0   

master上添加域名访问

[root@master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.220.17 master master.example.com
192.168.220.20 node1 node1.example.com
192.168.220.21 node2 node2.example.com


[root@master ~]# ping master
PING master (192.168.220.17) 56(84) bytes of data.
64 bytes from master (192.168.220.17): icmp_seq=1 ttl=64 time=0.020 ms
64 bytes from master (192.168.220.17): icmp_seq=2 ttl=64 time=0.022 ms
^Z
[1]+  Stopped                 ping master

[root@master ~]# ping node1
PING node1 (192.168.220.20) 56(84) bytes of data.
64 bytes from node1 (192.168.220.20): icmp_seq=1 ttl=64 time=0.325 ms
64 bytes from node1 (192.168.220.20): icmp_seq=2 ttl=64 time=0.422 ms
^Z
[2]+  Stopped                 ping node1


[root@master ~]# ping node2
PING node2 (192.168.220.21) 56(84) bytes of data.
64 bytes from node2 (192.168.220.21): icmp_seq=1 ttl=64 time=0.320 ms
64 bytes from node2 (192.168.220.21): icmp_seq=2 ttl=64 time=0.228 ms
^Z
[3]+  Stopped                 ping node2

三台主机上将桥接的IPv4流量传递到iptables的链:

[root@master ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF


[root@master ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1


[root@master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-coredump.conf ...
kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.all.promote_secondaries = 1
net.core.default_qdisc = fq_codel
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
net.core.optmem_max = 81920
* Applying /usr/lib/sysctl.d/50-pid-max.conf ...
kernel.pid_max = 4194304
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...

三台主机安装时间同步服务

[root@master ~]# dnf -y install vim wget chrony

[root@node1 ~]# dnf -y install vim wget chrony

[root@node2 ~]# dnf -y install vim wget chrony

# 三台都修改为aliyun的时间同步
[root@master ~]# cat /etc/chrony.conf 
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
pool time1.aliyun.com iburst                                            # 修改为pool time1.aliyun.com iburst

# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Enable kernel synchronization of the real-time clock (RTC).
rtcsync

# Enable hardware timestamping on all interfaces that support it.
#hwtimestamp *

# Increase the minimum number of selectable sources required to adjust
# the system clock.
#minsources 2

# Allow NTP client access from local network.
#allow 192.168.0.0/16

# Serve time even if not synchronized to a time source.
#local stratum 10

# Specify file containing keys for NTP authentication.
keyfile /etc/chrony.keys

# Get TAI-UTC offset and leap seconds from the system tz database.
leapsectz right/UTC

# Specify directory for log files.
logdir /var/log/chrony

# Select which information is logged.
#log measurements statistics tracking

[root@master ~]# systemctl enable --now chronyd

master上做免密登录

[root@master ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa): Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:dYbSEiqrqFYLgd4UggXxWJF81kftQKKDs/OoBrBoypU root@master.example.com
The key's randomart image is:
+---[RSA 3072]----+
|+=+o ..o+.       |
|.=ooo..ooo..     |
|o =o= ..oo+ o    |
|o. + +   +.o     |
|+.* o   S        |
|+= E             |
|* * o            |
|o= .             |
|=                |
+----[SHA256]-----+


[root@master ~]# ssh-copy-id master
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master (192.168.220.17)' can't be established.
ECDSA key fingerprint is SHA256:+w2iu/jKxDt9j9X0LelVpearhiefBgd+vm7AntCUiGo.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master'"
and check to make sure that only the key(s) you wanted were added.


[root@master ~]# ssh-copy-id node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node1 (192.168.220.20)' can't be established.
ECDSA key fingerprint is SHA256:Kv8kDJNeSd2AjUNVDTPmvrvCAXL7GNUKWHUYNoIfSHo.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node1'"
and check to make sure that only the key(s) you wanted were added.


[root@master ~]# ssh-copy-id node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node2 (192.168.220.21)' can't be established.
ECDSA key fingerprint is SHA256:UlA5inIMH+HDVNyu7eeFEwSE/hFSPS3DNqY6uE2do88.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node2'"
and check to make sure that only the key(s) you wanted were added.

查看三台主机的时间同步是否一致

[root@master ~]# for i in master node1 node2;do ssh $i 'date';done
Sat Dec 18 01:37:40 EST 2021
Sat Dec 18 01:37:40 EST 2021
Sat Dec 18 01:37:40 EST 2021

重启三台主机,让前面的所有配置都生效并进行检查。
init 6/reboot

三台主机安装docker

Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。

# 下载docker源
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 查看
[root@master ~]# ls /etc/yum.repos.d/
CentOS-Stream-AppStream.repo  CentOS-Stream-Extras.repo            CentOS-Stream-PowerTools.repo
CentOS-Stream-BaseOS.repo     CentOS-Stream-HighAvailability.repo  CentOS-Stream-RealTime.repo
CentOS-Stream-Debuginfo.repo  CentOS-Stream-Media.repo             docker-ce.repo

# 安装docker
[root@master ~]# yum -y install docker-ce


# 三台都配置加速器(中文注释请不要写入配置文件中,docker可能会起不来)
[root@master ~]# cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://wn5c7d7w.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],   # 使用systemd进程管理
  "log-driver": "json-file",                      # 日志格式为json类型
  "log-opts": {                   
    "max-size": "100m"                            # 最大日志大小为100MB,超过就重新生成一个文件记录日志                        
  },    
  "storage-driver": "overlay2"                    # 存储驱动为overlay2
}
EOF

#开机自启动docker
systemctl daemon-reload
systemctl enable --now docker

#查看docker状态
systemctl status docker

# 查看docker版本
[root@master ~]# docker --version
Docker version 20.10.23, build 7155243

# 查看加速器生效
[root@master ~]# docker info
Client:
 Context:    default
 Debug Mode: false
 Plugins:
  app: Docker App (Docker Inc., v0.9.1-beta3)
  buildx: Docker Buildx (Docker Inc., v0.10.0-docker)
  scan: Docker Scan (Docker Inc., v0.23.0)

Server:
 Containers: 0
  Running: 0
  Paused: 0
  Stopped: 0
 Images: 0
 Server Version: 20.10.23
 Storage Driver: overlay2
  Backing Filesystem: xfs
  Supports d_type: true
  Native Overlay Diff: true
  userxattr: false
 Logging Driver: json-file
 Cgroup Driver: systemd
 Cgroup Version: 1
 Plugins:
  Volume: local
  Network: bridge host ipvlan macvlan null overlay
  Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
 Swarm: inactive
 Runtimes: io.containerd.runc.v2 io.containerd.runtime.v1.linux runc
 Default Runtime: runc
 Init Binary: docker-init
 containerd version: 5b842e528e99d4d4c1686467debf2bd4b88ecd86
 runc version: v1.1.4-0-g5fd4c4d
 init version: de40ad0
 Security Options:
  seccomp
   Profile: default
 Kernel Version: 3.10.0-957.el7.x86_64
 Operating System: CentOS Linux 7 (Core)
 OSType: linux
 Architecture: x86_64
 CPUs: 2
 Total Memory: 2.919GiB
 Name: k8s-master
 ID: B4YS:NXWZ:NZHV:NCOO:Q52I:5XSL:T4HK:6UC2:YFGI:KDMI:I5UP:KJ6X
 Docker Root Dir: /var/lib/docker
 Debug Mode: false
 Registry: https://index.docker.io/v1/
 Labels:
 Experimental: false
 Insecure Registries:
  127.0.0.0/8
 Registry Mirrors:
  https://wn5c7d7w.mirror.aliyuncs.com/   #加速已生效
 Live Restore Enabled: false

三台都添加kubernetes阿里云YUM软件源

地址:kubernetes镜像-kubernetes下载地址-kubernetes安装教程-阿里巴巴开源镜像站 (aliyun.com)

# 三台都配置kubernetes.repo源并安装kubelet kubeadm kubectl

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#清除缓存并重建缓存后开始安装
yum clean all && yum makecache

#由于版本更新频繁,这里指定版本号部署:
yum install -y kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0

#设置开机自启即可,暂时不要运行
systemctl enable kubelet

containerd配置
为确保后面集群初始化及加入集群能够成功执行,需要配置containerd的配置文件/etc/containerd/config.toml,此操作需要在所有节点执行

containerd config default > /etc/containerd/config.toml
vi /etc/containerd/config.toml
sandbox_image = "registry.k8s.io/pause:3.6"  修改为 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8"

systemctl restart containerd

#由于默认拉取镜像地址registry.k8s.io国内无法访问,所以这里指定阿里云镜像仓库地址。

master上部署Kubernetes Master

在192.168.220.17上(Master)执行。

[root@master ~]# kubeadm init --apiserver-advertise-address 192.168.220.17 \   # masterIP
  --image-repository registry.aliyuncs.com/google_containers \
  --kubernetes-version v1.25.0 \                                                 # kubernetes版本
  --service-cidr=10.96.0.0/12 \                                                  # 不能改变
  --pod-network-cidr=10.244.0.0/16                                               # 不能改变

[init] Using Kubernetes version: v1.25.0
[preflight] Running pre-flight checks
        [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
        [WARNING FileExisting-tc]: tc not found in system path
        [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
.........

To start using your cluster, you need to run the following as a regular user: # 当前是普通用户请执行以下命令

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:                        # 当前是root用户请执行以下命令

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.220.17:6443 --token z9bkz4.8zl0ca032qqg3qwu \          # 将其写入一个文件中,后面会用到
        --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca
        
[root@master ~]# cat init 
kubeadm join 192.168.220.17:6443 --token z9bkz4.8zl0ca032qqg3qwu \
        --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca 
        
# 设置环境变量使用kubectl工具
[root@master ~]# echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' > /etc/profile.d/k8s.sh
[root@master ~]# source /etc/profile.d/k8s.sh

# 查看pod
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME                                 READY   STATUS    RESTARTS      AGE    IP               NODE         NOMINATED NODE   READINESS GATES
coredns-c676cc86f-bs478              1/1     Running   1 (64m ago)   2d1h   10.244.0.4       k8s-master   <none>           <none>
coredns-c676cc86f-vcbjg              1/1     Running   1 (64m ago)   2d1h   10.244.0.5       k8s-master   <none>           <none>
etcd-k8s-master                      1/1     Running   2 (64m ago)   2d1h   192.168.220.30   k8s-master   <none>           <none>
kube-apiserver-k8s-master            1/1     Running   2 (64m ago)   2d1h   192.168.220.30   k8s-master   <none>           <none>
kube-controller-manager-k8s-master   1/1     Running   2 (64m ago)   2d1h   192.168.220.30   k8s-master   <none>           <none>
kube-proxy-f8jqt                     1/1     Running   1 (64m ago)   2d1h   192.168.220.30   k8s-master   <none>           <none>
kube-proxy-t2j2w                     1/1     Running   1 (64m ago)   2d1h   192.168.220.31   k8s-node01   <none>           <none>
kube-proxy-w8dl2                     1/1     Running   1 (64m ago)   2d1h   192.168.220.32   k8s-node02   <none>           <none>
kube-scheduler-k8s-master            1/1     Running   2 (64m ago)   2d1h   192.168.220.30   k8s-master   <none>           <none>




# 查看端口
[root@master ~]# ss -antl
State          Recv-Q         Send-Q                  Local Address:Port                    Peer Address:Port         Process         
LISTEN         0              128                         127.0.0.1:10248                        0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:10249                        0.0.0.0:*                            
LISTEN         0              128                    192.168.220.17:2379                         0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:2379                         0.0.0.0:*                            
LISTEN         0              128                    192.168.220.17:2380                         0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:2381                         0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:10257                        0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:10259                        0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:43669                        0.0.0.0:*                            
LISTEN         0              128                           0.0.0.0:22                           0.0.0.0:*                            
LISTEN         0              128                                 *:10250                              *:*                            
LISTEN         0              128                                 *:6443                               *:*                            
LISTEN         0              128                                 *:10256                              *:*                            
LISTEN         0              128                              [::]:22                              [::]:*

#也可手动拉取所需镜像
[root@master ~]# kubeadm config print init-defaults > k8simagespull.yml
[root@master ~]# ls
anaconda-ks.cfg  init  k8simagespull.yml

[root@master ~]# vim k8simagespull.yml  #将其中的 imageRepository: 修改为阿里云的

[root@master ~]# kubeadm config images list --config k8simagespull.yml   #列出镜像
registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.25.0
registry.aliyuncs.com/google_containers/pause:3.8
registry.aliyuncs.com/google_containers/etcd:3.5.4-0
registry.aliyuncs.com/google_containers/coredns:v1.9.3

[root@master ~]# kubeadm config images pull --config k8simagespull.yml   #拉取镜像
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.25.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.8
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.4-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.9.3


# 查看节点
[root@master ~]# kubectl get nodes
NAME                 STATUS     ROLES                  AGE     VERSION
master.example.com   NotReady   control-plane,master   9m42s   v1.23.1 # "NotReady"表示还没就绪,后台还在运行


master上安装Pod网络插件(CNI)

Flannel可以添加到任何现有的Kubernetes集群中,尽管在使用pod网络的任何pod启动之前添加它是最简单的。

# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

加入Kubernetes Node

将 node1 和 node2 加入到集群中,使用之前创建的文件内容init

# 未加入之前master上查看
[root@master ~]# kubectl get nodes
NAME                 STATUS   ROLES                  AGE   VERSION
master.example.com   Ready    control-plane,master   23m   v1.23.1  # 只有一个

[root@master ~]# cat init  # 向集群添加新节点,执行在kubeadm init输出的以下内容
kubeadm join 192.168.220.17:6443 --token z9bkz4.8zl0ca032qqg3qwu \
        --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca


# 在node1上将node1上加入集群
[root@node1 ~]# kubeadm join 192.168.220.17:6443 --token z9bkz4.8zl0ca032qqg3qwu \
>         --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca

[preflight] Running pre-flight checks
        [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
        [WARNING FileExisting-tc]: tc not found in system path
        [WARNING Hostname]: hostname "node1.example.com" could not be reached
        [WARNING Hostname]: hostname "node1.example.com": lookup node1.example.com on 114.114.114.114:53: no such host
        [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


# 在node2上将node2上加入集群
[root@node2 ~]# kubeadm join 192.168.220.17:6443 --token z9bkz4.8zl0ca032qqg3qwu \
>         --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca

[preflight] Running pre-flight checks
        [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
        [WARNING FileExisting-tc]: tc not found in system path
        [WARNING Hostname]: hostname "node2.example.com" could not be reached
        [WARNING Hostname]: hostname "node2.example.com": lookup node2.example.com on 114.114.114.114:53: no such host
        [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

#让node01和02节点都可以使用kubectl命令
[root@master ~]# scp /etc/kubernetes/admin.conf root@node01:/etc/kubernetes/
admin.conf                                                                              100% 5642     9.4MB/s   00:00    
[root@master ~]# scp /etc/kubernetes/admin.conf root@node02:/etc/kubernetes/
admin.conf

将node1和node2加入集群之后master上查看

[root@master ~]# kubectl get nodes 
NAME                 STATUS   ROLES                  AGE     VERSION
master.example.com   Ready    control-plane,master   26m     v1.23.1
node1.example.com    Ready    <none>                 2m25s   v1.23.1
node2.example.com    Ready    <none>                 2m21s   v1.23.1
测试kubernetes集群

在Kubernetes集群中创建一个pod,验证是否正常运行:

# 创建一个pod,是deployment类型的nginx,使用nginx镜像,没有指定在哪个节点运行
[root@master ~]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created

# 暴露pod是deployment类型的nginx端口80,暴露在节点上
[root@master ~]# kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed

# 查看
[root@master ~]# kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-85b98978db-xd6wz   1/1     Running   0          68s

NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        30m
service/nginx        NodePort    10.99.129.159   <none>        80:31343/TCP   48s

# 查看在哪个节点上运行
[root@master ~]# kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-85b98978db-xd6wz   1/1     Running   0          87s   10.244.2.2   node2.example.com(运行在)   <none>           <none>

# 访问seriveIP
[root@master ~]# curl http://10.99.129.159
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
..............

# node2上查看映射的随机端口
[root@node2 ~]# ss -antl
State          Recv-Q         Send-Q                  Local Address:Port                    Peer Address:Port         Process         
LISTEN         0              128                         127.0.0.1:37919                        0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:10248                        0.0.0.0:*                            
LISTEN         0              128                         127.0.0.1:10249                        0.0.0.0:*                            
LISTEN         0              128                           0.0.0.0:31343(此端口)                        0.0.0.0:*                            
LISTEN         0              128                           0.0.0.0:22                           0.0.0.0:*                            
LISTEN         0              128                                 *:10250                              *:*                            
LISTEN         0              128                                 *:10256                              *:*                            
LISTEN         0              128                              [::]:22                              [::]:* 

访问node2IP:映射的随机端口(31343)
在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐