Rycky9.3安装k8s1.28.5+containerd/CRI-O网络插件flannel/calico
Rycky9.3安装k8s1.28.5+containerd/CRI-O网络插件flannel/calico
Rycky9.3安装k8s1.28.5+containerd/CRI-O网络插件flannel/calico
采用1个mastr多个node模式
参考https://kubernetes.io/zh-cn/docs/home/
环境准备
以macOS10.15.7作为宿主机,使用virtualBox作为虚拟机软件,为每个虚拟机配置两个网卡,一个NAT网络访问外网用,一个Host-Only网络和宿主机互通。
每个虚拟机都安装Rocky9.0基本服务器。配置yum源,虚拟机内部通过NAT和Host-Only都能ping通,可以访问外网。宿主机通过ssh可以登录到虚拟机。
使用item2连接多个服务器,使用快捷键command+shift+i实现多个服务同步输入相同内容
- 设置host
vim /etc/hosts
192.168.56.103 master
192.168.56.104 node1
192.168.56.105 node2
#查看是否正常
ping master
ping node1
ping node2
- 时间同步
systemctl start chronyd
systemctl enable chronyd
#查看时间是否同步
date
- 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
- 关闭selinux
#查看当前selinux状态
getenforce
#
vim /etc/selinux/config
SELINUX=disabled
5.关闭swap分区
vim /etc/fstab
#/dev/mapper/rl-swap none swap defaults 0 0
6.加载网桥过滤模块
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
lsmod | grep br_netfilter
#返回如下表示加载成功
br_netfilter 32768 0
bridge 303104 1 br_netfilter
7.添加kubernetes内核配置,添加网桥过滤和地址转发功能
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sudo sysctl --system
8.配置ipvs功能,ipvs比iptables性能高一些
#安装ipset和ipvsadm
yum install ipset ipvsadm -y
#添加需要加载的模块写入脚步文件
cat <<EOF | sudo tee /etc/modules-load.d/ipvs.conf
overlay
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
modprobe overlay
modprobe ip_vs && modprobe ip_vs_rr && modprobe ip_vs_wrr && modprobe ip_vs_sh && modprobe nf_conntrack
#查看是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack
9.安装containerd
参考https://github.com/containerd/containerd/blob/main/docs/getting-started.md
#安装containerd
tar Cxzvf /usr/local containerd-1.6.2-linux-amd64.tar.gz
#下载containerd服务,设置开机启动
#https://github.com/containerd/containerd/blob/main/containerd.service into /usr/local/lib/systemd/system/containerd.service
systemctl daemon-reload
systemctl enable --now containerd
#安装runc
install -m 755 runc.amd64 /usr/local/sbin/runc
#安装CNI
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.1.1.tgz
/usr/local/lib/systemd/system/containerd.service,文件内容
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration
#Environment="ENABLE_CRI_SANDBOXES=sandboxed"
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
10.配置 systemd cgroup 驱动,重载沙箱镜像,配置镜像源
#导出默认配置文件
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
#备份
cp /etc/containerd/config.toml /etc/containerd/config.toml.default
#修改
vim /etc/containerd/config.toml
#配置 systemd cgroup 驱动
#结合 runc 使用 systemd cgroup 驱动,在 /etc/containerd/config.toml 中设置:
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
#在你的 containerd 配置中, 你可以通过设置以下选项重载沙箱镜像:
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.2"
#配置镜像源,xxxxxx是自己阿里云镜像服务的
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://mmj258e6.mirror.aliyuncs.com","docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com", "https://registry-1.docker.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["https://k8s-gcr.xxx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
endpoint = ["https://gcr.xxx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."ghcr.io"]
endpoint = ["https://ghcr.xxx.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
endpoint = ["https://quay.xxx.com"]
#重启使生效
systemctl restart containerd.service
#测试,开始下载,正常
ctr images pull docker.io/library/nginx:stable
#配置crictl
cat >/etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
#测试
crictl pull nginx:stable
10.重启,使环境生效
reboot
11.查看环境是否正常
#selinux
getenforce
#关闭状态
Disabled
free -m
#swap禁用
total used free shared buff/cache available
Mem: 1866 144 1634 5 86 1602
Swap: 0 0 0
#内核模块ip_vs
lsmod | grep -e ip_vs -e nf_conntrack
12.使用CRI-O
VERSION=1.28
OS=CentOS_8
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo
or if you are using a subproject release:
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${SUBVERSION}:/${VERSION}/$OS/devel:kubic:libcontainers:stable:cri-o:${SUBVERSION}:${VERSION}.repo
sudo yum install cri-o
#更新
sudo yum update cri-o
sudo systemctl start crio
sudo systemctl enable crio
#配置文件位置
vim /etc/crio/crio.conf
#这一设置选项支持动态配置重加载来应用所做变更:systemctl reload crio
[crio.image]
pause_image="registry.k8s.io/pause:3.9"
#国内镜像加速,好多下载不下来,建议翻墙
sudo vim /etc/containers/registries.conf
unqualified-search-registries = ["docker.io", "registry.k8s.io", "registry.fedoraproject.org", "registry.access.redhat.com"]
[[registry]]
prefix = "docker.io"
location = "mmj258e6.mirror.aliyuncs.com"
[[registry]]
prefix = "registry.k8s.io/coredns/coredns"
location = "registry.aliyuncs.com/google_containers/coredns"
[[registry]]
prefix = "registry.k8s.io"
location = "registry.aliyuncs.com/google_containers"
安装kubernetes组件
1.安装kubernetes组件
#阿里云仓库
vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
#官网仓库
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
#安装kubectl kubelet kubeadm
yum install -y kubectl kubelet kubeadm
#如果不能安装注释掉exclude
#更新
yum update -y
3.配置kubelet的cgroup,启动服务,设置开机自启
vim /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
#设置开机自启
systemctl enable kubelet
kubernetes集群初始化,只在master上操作
1.命令行参数
#提前下载所需镜像
sudo kubeadm config images pull
#不使用使用ipvs
#api服务地址,master
#镜像仓库地址
#版本
#service网段
#pod网段
sudo kubeadm init \
--apiserver-advertise-address=192.168.56.103 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.28.5 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
2.使用配置文件并启用ipvs
#导出默认配置
kubeadm config print init-defaults > init.yaml
#备份
cp init.yaml init-defaults.yaml
#编辑
vim init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
#advertiseAddress改为本机ip
advertiseAddress: 192.168.56.103
bindPort: 6443
nodeRegistration:
#根据容器运行时选择
#criSocket: unix:///var/run/containerd/containerd.sock
criSocket: unix:///var/run/crio/crio.sock
imagePullPolicy: IfNotPresent
#自己起的名
name: master
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
#imageRepository镜像地址,如果在crio配置过了就不用改了
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.5
networking:
dnsDomain: cluster.local
#networking:podSubnet,pod的网络
podSubnet: 10.244.0.0/16
#networking:serviceSubnet,service网络
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
#启动方式使用systemd
cgroupDriver: systemd
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
#代理方式使用ipvs
mode: ipvs
修改了
advertiseAddress改为本机ip
imageRepository镜像地址
nodeRegistration:name自己起的名
networking:podSubnet,pod的网络
networking:serviceSubnet,service网络
代理方式使用ipvs
启动方式使用systemd
验证文件有没有错
kubeadm init phase preflight --config=init.yaml
2.创建集群
sudo kubeadm init --config=init.yaml
#创建失败恢复
sudo kubeadm reset
#k8s reset之后彻底清除上次初始化
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -Xsystemctl stop kubelet
sudo ipvsadm --clear
sudo rm -rf /var/lib/cni/* && \
sudo rm -rf /var/lib/kubelet/ && \
sudo rm -rf /etc/cni/
sudo ifconfig cni0 down
#如果有
sudo ifconfig flannel.1 down
sudo ip link delete cni0
#如果有
sudo ip link delete flannel.1
#之后可以重新kubeadm init
#查看详细日志输入
sudo kubeadm init --config=init.yaml --v=6
#输出以下表示成功
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.56.103:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:0b136488dc5c9ce642f17a8b7d715f83edbd63671f904472cb066654b500f596
3.按照指示配置kubeconfig,这样kubectl才可以查看到集群信息
#普通user
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#root用户
vim /etc/profile.d/sh.local
export KUBECONFIG=/etc/kubernetes/admin.conf
source /etc/profile
echo $KUBECONFIG
/etc/kubernetes/admin.conf
#保存加入节点命令
vim join-node
kubeadm join 192.168.56.103:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:0b136488dc5c9ce642f17a8b7d715f83edbd63671f904472cb066654b500f596
#如果toke过期或丢失
#查看token
kubeadm token list
#创建token
kubeadm token create
#生成sha256哈希值
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
#创建token和哈希值
kubeadm token create --print-join-command
4.查看节点,加入从节点
kubectl get nodes
#如果显示如下,
The connection to the server localhost:8080 was refused - did you specify the right host or port?
source /etc/profile
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane 59m v1.28.5
#查看pods
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-c676cc86f-25848 0/1 ContainerCreating 0 68s
kube-system coredns-c676cc86f-bq2r6 0/1 ContainerCreating 0 68s
kube-system etcd-master 1/1 Running 1 83s
kube-system kube-apiserver-master 1/1 Running 1 84s
kube-system kube-controller-manager-master 1/1 Running 0 83s
kube-system kube-proxy-fclpf 1/1 Running 0 68s
kube-system kube-scheduler-master 1/1 Running 1 83s
#其它节点加入,在其它节点服务器用root执行
kubeadm join 192.168.56.103:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:0b136488dc5c9ce642f17a8b7d715f83edbd63671f904472cb066654b500f596
#如果节点之前加入过master,又重新加入报错
#GET https://192.168.56.103:6443/api/v1/nodes/node1?timeout=10s 401 Unauthorized in 1 milliseconds
#执行kubeadm reset后再重新加入
#再次查看
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane 63m v1.28.5
node1 NotReady <none> 12s v1.28.5
5.加入主节点
#master上操作
sudo kubeadm init phase upload-certs --upload-certs
sudo kubeadm token create --print-join-command
#拿到第二步的结果在新主节点上操作
kubeadm join 172.16.0.1:6443 --token xxxxxxxxx --discovery-token-ca-cert-hash xxxxxxx --control-plane --certificate-key xxxxxxx
成功后和主节点初始化时一样会出现
安装网络插件
使用flannel,指在master操作即可
https://github.com/flannel-io/flannel#deploying-flannel-manually
使用
wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
或
执行命令:kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kube-flannel.yml文件如下,此版本为v0.19.2
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
按照预定的pod cidr是init.yml中的networking:podSubnet决定是否修改参数net-conf.json
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
把docker.io换成国内的镜像或翻墙
kubectl apply -f kube-flannel.yml
#输出如下
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
#查看nodes,已经由NotReady变成Ready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 114m v1.28.5
node1 Ready <none> 51m v1.28.5
node2 Ready <none> 50m v1.28.5
#查看pod
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-765dd7bc86-r624c 0/1 ContainerCreating 0 33m
kube-flannel kube-flannel-ds-jjlr6 0/1 CrashLoopBackOff 14 (13s ago) 55m
kube-flannel kube-flannel-ds-qs5gw 0/1 Error 14 (5m49s ago) 55m
kube-flannel kube-flannel-ds-wvb2f 0/1 CrashLoopBackOff 15 (2m18s ago) 55m
kube-system coredns-c676cc86f-p4cxj 0/1 ContainerCreating 0 168m
kube-system coredns-c676cc86f-r82tx 0/1 ContainerCreating 0 168m
kube-system etcd-master 1/1 Running 0 168m
kube-system kube-apiserver-master 1/1 Running 0 168m
kube-system kube-controller-manager-master 1/1 Running 0 168m
kube-system kube-proxy-6smd5 1/1 Running 0 105m
kube-system kube-proxy-tkl9v 1/1 Running 0 168m
kube-system kube-proxy-vwj7r 1/1 Running 0 104m
kube-system kube-scheduler-master 1/1 Running 0 168m
#查看没起来原因
kubectl logs kube-flannel-ds-wvb2f -n kube-flannel
如果有多个网卡报错找不到网卡指定一个网卡
150 containers:
151 - name: kube-flannel
152 #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
153 image: hub-mirror.c.163.com/rancher/mirrored-flannelcni-flannel:v0.19.2#换成了网易的docker镜像站
154 command:
155 - /opt/bin/flanneld
156 args:
157 - --ip-masq
158 - --kube-subnet-mgr
159 #- --iface=enp0s8#指定本地的网卡
如果遇到Error registering network: failed to acquire lease: node “spinq-master” pod cidr not assigned,
检查预定的pod cidr是init.yml中的networking:podSubnet决定是否修改参数net-conf.json
处理完问题,这样就正常了
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-rmhqh 1/1 Running 0 12m
kube-system coredns-c676cc86f-25848 1/1 Running 0 19m
kube-system coredns-c676cc86f-bq2r6 1/1 Running 0 19m
kube-system etcd-master 1/1 Running 1 19m
kube-system kube-apiserver-master 1/1 Running 1 19m
kube-system kube-controller-manager-master 1/1 Running 0 19m
kube-system kube-proxy-fclpf 1/1 Running 0 19m
kube-system kube-scheduler-master 1/1 Running 1 19m
使用calico
官网https://www.tigera.io/project-calico/
下载https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml
修改CALICO_IPV4POOL_CIDR为之前kubeadm初始化指定的pod网段10.244.0.0/16
如果是注释的不用修改
vim calico.yaml
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
下载需要的镜像
grep image calico.yaml
image: docker.io/calico/cni:v3.26.1
imagePullPolicy: IfNotPresent
image: docker.io/calico/cni:v3.26.1
imagePullPolicy: IfNotPresent
image: docker.io/calico/node:v3.26.1
imagePullPolicy: IfNotPresent
image: docker.io/calico/node:v3.26.1
imagePullPolicy: IfNotPresent
image: docker.io/calico/kube-controllers:v3.26.1
imagePullPolicy: IfNotPresent
sudo crictl pull docker.io/calico/cni:v3.26.1
sudo crictl pull docker.io/calico/node:v3.26.1
sudo crictl pull docker.io/calico/kube-controllers:v3.26.1
#安装网络插件,不用sudo
kubectl apply -f calico.yaml
查看安装情况
kubectl get pods -n kube-system
如果失败了可以执行恢复
kubectl delete -f calico.yaml
如果STATUS为ImagePullBackOff或Init:ImagePullBackOff
sudo crictl pull docker.io/calico/cni:v3.26.1
sudo crictl pull docker.io/calico/node:v3.26.1
sudo crictl pull docker.io/calico/kube-controllers:v3.26.1
查看运行具体情况
kubectl describe pods pod名称 -n kube-system
#查看nodes,已经由NotReady变成Ready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 114m v1.28.5
node1 Ready <none> 51m v1.28.5
node2 Ready <none> 50m v1.28.5
常用命令
格式: kubectl an_action a_resource a_resource_name -flags
an_action:表示一个行动,例如:get、create或describe。deployment
1、kubectl cluster-info 查看集群信息
2、kubectl get componentstatuses查看集群master节点功能
3、kubectl get nodes --show-labels查看集群节点信息
4、kubectl describe node 10-0-0-171.node 描述某个节点的信息
5、kubectl get pods -o wide --all-namespaces 查询全部pod信息
6、kubectl get svc --all-namespaces -o wide 查看全部svc信息(service服务信息,相当于网关,pod入口)
7、kubectl get deployments --all-namespaces 查看全部deployments信息
8、kubectl get ingresses --all-namespaces 查看全部ingress信息
9、kubectl get secret --all-namespaces查看全部secret信息
10、kubectl get pv 查看全部pv信息
11、kubectl get pvc --all-namespaces 查看全部pvc信息
12、kubectl get job --all-namespaces -o wide 查看全部job信息
13、kubectl get pod weave-net-xhh0h --namespace=kube-system -o yaml 查看某pod的yaml文件内容
14、kubectl describe pod calico-etcd --namespace=kube-system 描述某pod的运行状态
15、kubectl describe svc kube-dns --namespace=kube-system 描述某svc的运行状态
16、kubectl describe deployments sitealive --namespace=cluster2016 描述某deployments的运行状态
17、kubectl delete pod sitealive-3553544168-q4gtx --namespace=wmcluster2016 删除某pod
18、kubectl logs proxycrawler-2347617609-mwzts --namespace=wmcluster2016 查询某pod的log
19、kubectl exec -it busybox – sh 进去某pod的容器
20、kubectl run my-nginx --image=nginx --replicas=2 --port=80 创建两个监听80端口的nginx pods
21、kubectl delete deployment my-nginx 停止容器
22、kubectl edit pod/nginx -n default 修改yaml配置文件
23、kubectl api-versions 查看当前api的版本
24、kubectl -n kubernetes-dashboard patch svc kubernetes-dashboard -p ‘{“spec”:{“type”:“NodePort”}}’ 热更新打补丁的方式修改配置文件
25、kubectl scale --replicas=2 deployment/my-nginx 修改pod数量为2
26、kubectl create deployment my-nginx --image=nginx --port=80 --replicas=3 创建1个nginx deployment,它将创建3个监听80端口的nginx pods并保持
27、kubectl apply -f tomcat-deploy.yml 应用yaml配置文件
28、kubectl get pod -o wide 显示pod的更详细信息
29、kubectl get pod -o yaml 以yaml格式显示Pod的详细信息
30、kubectl delete pod tomcat --force --grace-period=0 强制删除pod,当pod一直处于terminating状态时使用(一般包含nfs挂载的pod很容易出现)
31、kubectl get deploy/nginx-deployment --export -o yaml > my-deploy.yaml 把现有的pod资源导出到yaml文件
32、kubectl delete pod pod-name -n test --force --grace-period=0 删除test的namespace下的pod名为pod-name的pod
测试
#部署,containerd镜像要写全路径不然找不到
kubectl create deployment nginx --image=nginx:stable
#暴露端口
kubectl expose deployment nginx --port=80 --type=NodePort
#查看服务状态
kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-5dbc74ff5-clffg 1/1 Running 0 101s
kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 57m
nginx NodePort 10.98.56.133 <none> 80:31124/TCP 40s
至此Rycky9.3安装k8s1.28.5+containerd/CRI-O搭建完毕。
更多推荐
所有评论(0)