rocky9.2部署kubernetes 1.29.x 高可用集群
代理IP :192.168.0.10。生成kubeadmconfig文件。修改kubeadmconfig文件。查看k8s内部dns服务器地址。下载flannel部署文件。设置kubectl命令补全。初始化master节点。创建一个应用,用于测试。查看service资源。
参考:
https://mp.weixin.qq.com/s/6P-V87Wgn3BxmG1guP1AyA
nginx代理配置
代理IP :192.168.0.10
vim /etc/nginx/nginx.conf
stream {
upstream kube-apiserver {
server 192.168.0.11:6443 max_fails=3 fail_timeout=30s;
#server 192.168.0.12:6443 max_fails=3 fail_timeout=30s;
#server 192.168.0.13:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 6443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
设置主机名
hostnamectl set-hostname master1
hostnamectl set-hostname node1
hostnamectl set-hostname node2
配置hosts
vim /etc/hosts
192.168.0.11 master1
192.168.0.21 node1
192.168.0.22 node2
文件句柄
vim /etc/security/limits.conf
...
* soft nofile 65535
* hard nofile 65535
* soft nproc 65535
* hard nproc 65535
修改内核参数并加载内核模块
vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
modprobe br_netfilter
lsmod | grep netfilter
modprobe ip_conntrack
lsmod | grep conntrack
sysctl -p /etc/sysctl.d/k8s.conf
安装依赖
yum install wget jq psmisc vim net-tools nfs-utils socat telnet device-mapper-persistent-data lvm2 git tar zip curl conntrack ipvsadm ipset iptables sysstat libseccomp
开启ipvs转发
mkdir /etc/sysconfig/modules
vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod |egrep "ip_vs|nf_conntrack"
安装 containerd
vim /etc/modules-load.d/containerd.conf
overlay
br_netfilter
modprobe overlay
modprobe br_netfilter
vim /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/9/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
yum install containerd
containerd config default > /etc/containerd/config.toml
vim /etc/containerd/config.toml
修改
...
SystemdCgroup = true
...
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
...
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://p4oudlho.mirror.aliyuncs.com"]
systemctl start containerd.service
systemctl enable containerd.service
安装 Kubernetes 1.29.x
vim /etc/yum.repos.d/k8s.repo
[kubernetes]
name=kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=0
yum list kubelet --showduplicates |sort -r |grep "1.29"
yum install kubectl kubelet kubeadm
kubelet-1.29.3
kubectl-1.29.3
kubeadm-1.29.3
配置kubelet
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
systemctl enable kubelet
下载镜像
kubeadm config images list --kubernetes-version=v1.29.3
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
下载flannel部署文件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
或
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
ctr -n k8s.io image pull docker.io/flannel/flannel:v0.24.3
ctr -n k8s.io image pull docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
ctr -n k8s.io image ls
初始化k8s集群
生成kubeadmconfig文件
kubeadm config print init-defaults > kubeadm-config.yaml
修改kubeadmconfig文件
vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.0.11
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.0.10:6443
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.29.3
networking:
dnsDomain: cluster.local
podSubnet: 10.224.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
初始化master节点
kubeadm init --config kubeadm-config.yaml --upload-certs
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:2f8cdee0d37f2be0952df50fe8cf44f74db8ddca8f586a6fc9038d1a83696aa0 \
--control-plane --certificate-key 122741eac5f35ef64361fd398e402f84282f00f5f2f33630523eab02e15ac0d7
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:2f8cdee0d37f2be0952df50fe8cf44f74db8ddca8f586a6fc9038d1a83696aa0
设置kubectl命令补全
echo "source <(kubectl completion bash)" >> /etc/profile
部署flannel网络插件
kubectl apply -f kube-flannel.yml
node节点加入集群
kubeadm join 10.0.0.30:6443 --token g6ei5m.kxpvbwctcr5qhbvg \
--discovery-token-ca-cert-hash sha256:611ae4e3b21103ebbda2879073653637b6cfe02875176dc1e3edd679cca50781
查看结果
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready control-plane 26m v1.29.3
node1 Ready <none> 23m v1.29.3
node2 Ready <none> 22m v1.29.3
[root@master1 ~]# kubectl -n kube-system get pod
NAME READY STATUS RESTARTS AGE
coredns-857d9ff4c9-6cxgq 1/1 Running 1 (16m ago) 26m
coredns-857d9ff4c9-rngq6 1/1 Running 1 (16m ago) 26m
etcd-master1 1/1 Running 1 (16m ago) 26m
kube-apiserver-master1 1/1 Running 1 (16m ago) 26m
kube-controller-manager-master1 1/1 Running 1 (16m ago) 26m
kube-proxy-c8d6g 1/1 Running 1 (16m ago) 23m
kube-proxy-jfmnt 1/1 Running 1 (16m ago) 26m
kube-proxy-s5s58 1/1 Running 1 (16m ago) 23m
kube-scheduler-master1 1/1 Running 1 (16m ago) 26m
部署应用验证k8s集群
部署nginx服务
vi deploy_nginx.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: default
spec:
replicas: 2
selector:
matchLabels:
appname: nginx
template:
metadata:
labels:
appname: nginx
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: appname
operator: In
values:
- nginx
topologyKey: kubernetes.io/hostname
weight: 50
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 80
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 200m
memory: 512Mi
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: default
spec:
type: NodePort
selector:
appname: nginx
ports:
- port: 80
protocol: TCP
targetPort: 80
nodePort: 30080
访问验证服务
http://10.0.0.31:30080
创建一个应用,用于测试
vi nginx-test.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test
namespace: test
spec:
replicas: 1
selector:
matchLabels:
app: nginx-test
template:
metadata:
labels:
app: nginx-test
spec:
containers:
- name: nginx-test
image: nginx:alpine
ports:
- containerPort: 80
验证k8s网络可用性
[root@master1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-7dc8c9c7c7-6rhw7 1/1 Running 0 25m 10.224.1.2 node1 <none> <none>
nginx-7dc8c9c7c7-l8mcm 1/1 Running 0 25m 10.224.2.2 node2 <none> <none>
[root@master1 ~]# kubectl -n test get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-test-67d98c7b59-4zz2n 1/1 Running 0 16m 10.224.1.3 node1 <none> <none>
[root@master1 ~]# kubectl -n test exec -it nginx-test-67d98c7b59-4zz2n -- /bin/sh
/ # ping 10.224.2.2
PING 10.224.2.2 (10.224.2.2): 56 data bytes
64 bytes from 10.224.2.2: seq=0 ttl=62 time=1.114 ms
64 bytes from 10.224.2.2: seq=1 ttl=62 time=0.980 ms
64 bytes from 10.224.2.2: seq=2 ttl=62 time=1.808 ms
^C
--- 10.224.2.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.980/1.300/1.808 ms
验证k8s内部dns可用性
查看k8s内部dns服务器地址
[root@master1 ~]# kubectl -n kube-system get service kube-dns -o yaml
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: "2024-03-17T02:46:37Z"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "223"
uid: 3beed986-86dc-4dab-b032-1de86366d3e7
spec:
clusterIP: 10.96.0.10
clusterIPs:
- 10.96.0.10
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
- name: metrics
port: 9153
protocol: TCP
targetPort: 9153
selector:
k8s-app: kube-dns
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
查看service资源
[root@master1 ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h24m
nginx NodePort 10.109.223.79 <none> 80:30080/TCP 24m
资源记录
SVC_NAME.NS_NAME.DOMAIN.LTD.
nginx.default.svc.cluster.local.
查看dns解析
[root@master1 ~]# dig -t A nginx.default.svc.cluster.local. @10.96.0.10
; <<>> DiG 9.16.23-RH <<>> -t A nginx.default.svc.cluster.local. @10.96.0.10
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 23373
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: 55472bf61b671b94 (echoed)
;; QUESTION SECTION:
;nginx.default.svc.cluster.local. IN A
;; ANSWER SECTION:
nginx.default.svc.cluster.local. 30 IN A 10.109.223.79
;; Query time: 0 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Sun Mar 17 14:11:15 CST 2024
;; MSG SIZE rcvd: 119
更多推荐
所有评论(0)