关于我们

https://yuemake.xyz
公众号: 悦码客
v: ymkcode

专注方向:
自动化流程服务
it咨询
it在线教学

step

机器准备

虚拟机

使用之前课程做好的镜像, 创建本次实验的机器

物理机

自行安装 ubuntu 2004 镜像

网络规划 与 固定ip 配置

单节点 k8s 固定服务器 ip
网络采用的公用交换机, 也就是虚拟机连接 物理交换机的模式

服务器: 192.168.31.111
家庭网关: 192.168.31.1

硬盘:100g+
内存: 4g+

echo 'network: {config: disabled}' > /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg

cat > /etc/netplan/50-cloud-init.yaml <<"EOF"
# This file is generated from information provided by the datasource.  Changes
# to it will not persist across an instance reboot.  To disable cloud-init's
# network configuration capabilities, write a file
# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
# network: {config: disabled}
network:
    ethernets:
        ens192:
            addresses: [ "192.168.31.111/24" ]
            gateway4: 192.168.31.1
            nameservers:
                addresses: [ "223.5.5.5", "223.6.6.6" ]
    version: 2
EOF

netplan apply

hostnamectl  set-hostname node-111

cat >> /etc/hosts << "EOF"
192.168.31.111 my-cluster-endpoint.com
EOF

ssh root@192.168.31.111

修改containerd runtime

安装 容器运行时
https://kubernetes.io/docs/setup/production-environment/container-runtimes/

k8s\deploy\config\containerd\readme.md

kubeadm 初始化 k8s 集群

这个是 k8s 集群管理工具

https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/


# 验证设备 id 是否唯一
ifconfig -a

cat /sys/class/dmi/id/product_uuid

# 检查端口
nc 127.0.0.1 6443

# 换源
cp -a /etc/apt/sources.list /etc/apt/sources.list.bak

sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list

# 更新
apt update -y



# 安装 kubeadm
apt-get update && apt-get install -y apt-transport-https

curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb  [trusted=yes] https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF

apt-get update
apt-get install -y kubelet kubeadm kubectl

#   --image-repository string              Choose a container registry to pull control plane images from (default "registry.k8s.io")
kubeadm config images  list  \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.25.4
# registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.4
# registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.4
# registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.4
# registry.aliyuncs.com/google_containers/kube-proxy:v1.25.4
# registry.aliyuncs.com/google_containers/pause:3.8
# registry.aliyuncs.com/google_containers/etcd:3.5.5-0
# registry.aliyuncs.com/google_containers/coredns:v1.9.3

# pull
kubeadm config images  pull  \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.25.4

# https://projectcalico.docs.tigera.io/getting-started/kubernetes/quickstart
# --pod-network-cidr 原因
# failed to pull image \"k8s.gcr.io/pause:3.5

kubeadm init \
    --control-plane-endpoint my-cluster-endpoint.com  \
    --pod-network-cidr 10.10.0.0/16 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.25.4

# 成功输出
# Your Kubernetes control-plane has initialized successfully!

# To start using your cluster, you need to run the following as a regular user:

#   mkdir -p $HOME/.kube
#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
#   sudo chown $(id -u):$(id -g) $HOME/.kube/config

# Alternatively, if you are the root user, you can run:

#   export KUBECONFIG=/etc/kubernetes/admin.conf

# You should now deploy a pod network to the cluster.
# Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
#   https://kubernetes.io/docs/concepts/cluster-administration/addons/

# You can now join any number of control-plane nodes by copying certificate authorities
# and service account keys on each node and then running the following as root:

#   kubeadm join my-cluster-endpoint.com:6443 --token jqy60i.mdnqalijxf0fxntw \
#         --discovery-token-ca-cert-hash sha256:dcf01ed536db3dfa11d0558beed12b9505cd962eea9dc9dbd04d2517f549af92 \
#         --control-plane

# Then you can join any number of worker nodes by running the following on each as root:

# kubeadm join my-cluster-endpoint.com:6443 --token jqy60i.mdnqalijxf0fxntw \
#         --discovery-token-ca-cert-hash sha256:dcf01ed536db3dfa11d0558beed12b9505cd962eea9dc9dbd04d2517f549af92

echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>~/.bashrc
echo 'source <(kubectl completion bash)' >>~/.bashrc
source ~/.bashrc

# 允许调度节点启动工作 pod
kubectl taint nodes --all node-role.kubernetes.io/control-plane-

# kubectl taint nodes --all node-role.kubernetes.io/master-

# 查看pod
kubectl get pod -A
# NAMESPACE     NAME                               READY   STATUS    RESTARTS   AGE
# kube-system   coredns-c676cc86f-49w4r            0/1     Pending   0          20s
# kube-system   coredns-c676cc86f-4kbfz            0/1     Pending   0          20s
# kube-system   etcd-node-111                      1/1     Running   0          36s
# kube-system   kube-apiserver-node-111            1/1     Running   1          37s

# 因为没有 网络 插件 所以 coredns 处于pending 状态
# https://projectcalico.docs.tigera.io/getting-started/kubernetes/quickstart
ctr -n k8s.io images pull quay.io/tigera/operator:v1.28.5

# 参考 k8s\deploy\config\calico
cd /git_proj/blogs/k8s/deploy/config/calico
kubectl create -f tigera-operator.yaml
kubectl create -f custom-resources.yaml

# 注意配置代理, 如果网络比较好,需要 15s calico 容器可以正常工作,
watch kubectl get pods -n calico-system

#  Pulling image "docker.io/calico/node:v3.24.5"
# 检查 calico 镜像拉取时间
ctr -n k8s.io images pull docker.io/calico/node:v3.24.5

kubectl get pod -A
# NAMESPACE          NAME                                       READY   STATUS    RESTARTS   AGE
# calico-apiserver   calico-apiserver-7697c748fc-kpn78          1/1     Running   0          63s
# calico-apiserver   calico-apiserver-7697c748fc-wx99k          1/1     Running   0          63s
# calico-system      calico-kube-controllers-6b57db7fd6-kvxwd   1/1     Running   0          10m
# calico-system      calico-node-9vp7g                          1/1     Running   0          10m
# calico-system      calico-typha-ff579c7d8-nv7fx               1/1     Running   0          10m
# kube-system        coredns-c676cc86f-49w4r                    1/1     Running   0          21m
# kube-system        coredns-c676cc86f-4kbfz                    1/1     Running   0          21m
# kube-system        etcd-node-111                              1/1     Running   0          21m
# kube-system        kube-apiserver-node-111                    1/1     Running   1          21m
# kube-system        kube-controller-manager-node-111           1/1     Running   1          21m
# kube-system        kube-proxy-ppkjw                           1/1     Running   0          21m
# kube-system        kube-scheduler-node-111                    1/1     Running   1          21m
# tigera-operator    tigera-operator-6bb5985474-xxbk6           1/1     Running   0          11m

# 所有组件初始化完成

# 验证业务容器 参考
kubectl apply -f /git_proj/blogs/nginx/yml/simple-nginx.yml

ctr


ctr ns ls
NAME   LABELS
k8s.io

ctr -n k8s.io images ls

安装失败 重新安装

ng node" err=“node “ubuntuguest” not found”
ng node" err=“node “ubuntuguest” not found”
untime network not ready" networkReady=“NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized”
ncing pod, skipping" err="failed to “CreatePodSandbox” for “kube-scheduler-ubuntuguest_kube-system(56f329608896f82e67176fd6ae7b33e8)” with CreatePodSandboxError: "Failed to create sandbox for pod \“k>
CreatePodSandbox for pod failed” err="rpc error: code = Unknown desc = failed to get sandbox image “k8s.gcr.io/pause:3.5”: failed to pull image “k8s.gcr.io/pause:3.5”: failed to pull and unpack image “>
ailed to create sandbox for pod” err=“rpc error: code = Unknown desc = failed to get sandbox image “k8s.gcr.io/pause:3.5”: failed to pull image “k8s.gcr.io/pause:3.5”: failed to pull and unpack image “>
dSandbox from runtime service failed” err=“rpc error: code = Unknown desc = failed to get sandbox image “k8s.gcr.io/pause:3.5”: failed to pull image “k8s.gcr.io/pause:3.5”: failed to pull and unpack ima>
ng node” err=“node “ubuntuguest” not found”
ng node” err=“node “ubuntuguest” not found”


yes y | kubeadm reset
rm -rf /etc/cni/net.d
rm -rf /root/.kube/

apt install ipvsadm -y

ipvsadm --clear

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐