参考:https://mp.weixin.qq.com/s/7i68jmvi2eo_6wlqYEOupQ

操作系统:Ubuntu 22.04

ubuntu22.04系统基础设置

设置时区

timedatectl set-timezone Asia/Shanghai

禁用防火墙

systemctl disable ufw.service
ufw disable
ufw status

设置ulimit

vim /etc/security/limits.conf

#文件末尾添加
* soft nofile 65535
* hard nofile 65535

配置hosts

vim /etc/hosts

192.168.0.11 master1
192.168.0.12 master2
192.168.0.13 master3
192.168.0.21 node1
192.168.0.22 node2
192.168.0.23 node3

nginx代理配置

代理IP :192.168.0.10

安装nginx

apt install curl gnupg2 ca-certificates lsb-release ubuntu-keyring
curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" | sudo tee /etc/apt/sources.list.d/nginx.list

echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" | sudo tee /etc/apt/preferences.d/99nginx

apt update
apt install nginx

配置nginx代理
vim /etc/nginx/nginx.conf

stream {
    upstream kube-apiserver {
        server 192.168.0.11:6443     max_fails=3 fail_timeout=30s;
        #server 192.168.0.12:6443     max_fails=3 fail_timeout=30s;
        #server 192.168.0.13:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 6443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

设置主机名

hostnamectl set-hostname master1
hostnamectl set-hostname node1
hostnamectl set-hostname node2

安装ipvsadm和ipset

ipset 主要用于支持 Service 的负载均衡和网络策略。它可以帮助实现高性能的数据包过滤和转发,以及对 IP 地址和端口进行快速匹配
ipvsadm 主要用于配置和管理 IPVS 负载均衡器,以实现 Service 的负载均衡

apt install -y ipset ipvsadm

加载内核模块

modprobe br_netfilter
modprobe overlay
modprobe ip_conntrack
modprobe  ip_vs
modprobe  ip_vs_rr
modprobe  ip_vs_wrr
modprobe  ip_vs_sh
modprobe  nf_conntrack
lsmod | grep conntrack
lsmod | grep br_netfilt
lsmod | grep overlay
lsmod |egrep  "ip_vs|nf_conntrack"

创建一个名为 kubernetes.conf 的内核配置文件,并写入以下配置内容

cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# net.ipv4.conf.all.send_redirects = 0
# net.ipv4.conf.default.send_redirects = 0
# net.netfilter.nf_conntrack_max = 1000000
# net.netfilter.nf_conntrack_tcp_timeout_established = 86400
# net.core.somaxconn = 1024
# net.ipv4.tcp_syncookies = 1
# net.ipv4.tcp_max_syn_backlog = 2048
# net.ipv4.tcp_synack_retries = 2
fs.file-max = 65536
vm.swappiness = 0
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf

系统引导时自动加载的内核模块

cat > /etc/modules-load.d/kubernetes.conf << EOF
# /etc/modules-load.d/kubernetes.conf
br_netfilter
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
ip_tables
EOF
chmod a+x /etc/modules-load.d/kubernetes.conf

关闭swap分区

swapoff -a
# 注释掉/etc/fstab swap分区的配置

容器运行时:docker-ce和cri-dockerd

安装docker-ce

apt -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt install docker-ce

vim /etc/docker/daemon.json

{
"registry-mirrors": [
  "https://ahed1oup.mirror.aliyuncs.com"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
  "max-size": "200m"
},
"storage-driver": "overlay2"  
}

启动docker

systemctl daemon-reload
systemctl start docker.service
systemctl enable docker.service

安装cri-dockerd

curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.10/cri-dockerd_0.3.10.3-0.ubuntu-jammy_amd64.deb
apt install ./cri-dockerd_0.3.10.3-0.ubuntu-jammy_amd64.deb
systemctl status cri-docker.service

安装kubelet、kubeadm和kubectl

apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/Release.key |    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/deb/ /" |    tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
systemctl enable kubelet.service
kubeadm                                1.29.3-1.1
kubectl                                1.29.3-1.1
kubelet                                1.29.3-1.1

整合kubelet和cri-dockerd(仅cri-dockerd需要)

配置cri-dockerd

vim /usr/lib/systemd/system/cri-docker.service
修改ExecStart配置

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

重启cri-docker

systemctl daemon-reload && systemctl restart cri-docker.service

配置kubelet

vim /etc/sysconfig/kubelet

KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"
KUBELET_EXTRA_ARGS=

初始化第一个主节点

下载镜像

kubeadm config images list
kubeadm config images list --image-repository=registry.aliyuncs.com/google_containers

kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers --cri-socket unix:///run/cri-dockerd.sock

下载flannel部署文件
https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

    image: docker.io/flannel/flannel:v0.24.4
    image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
    image: docker.io/flannel/flannel:v0.24.4

生成kubeadmconfig文件

kubeadm config print init-defaults > kubeadm-config.yaml

修改kubeadmconfig文件
vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.11
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.0.10:6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.29.3
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}

初始化master节点

kubeadm init --config kubeadm-config.yaml --upload-certs

...
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7 \
	--control-plane --certificate-key 4cbae11642fb00bd1bf97ab47384d222d501c3e3df5217e4a7472909f79dc933

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

设置kubectl命令补全

echo "source <(kubectl completion bash)" >> /etc/profile

部署flannel网络插件

kubectl apply -f kube-flannel.yml

node节点加入集群

kubeadm join 192.168.0.10:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:ff71ad6d9569107cf146a9a05d72ee0af13db3aa721bd8a9fe4e2b7e9b3bddb7 --cri-socket unix:///run/cri-dockerd.sock

查看结果

root@master1:~# kubectl get nodes 
NAME      STATUS   ROLES           AGE   VERSION
master1   Ready    control-plane   52m   v1.29.3
node1     Ready    <none>          49m   v1.29.3
node2     Ready    <none>          49m   v1.29.3
root@master1:~# kubectl -n kube-system get pod
NAME                              READY   STATUS    RESTARTS      AGE
coredns-857d9ff4c9-28j7b          1/1     Running   1 (18m ago)   52m
coredns-857d9ff4c9-mm892          1/1     Running   1 (18m ago)   52m
etcd-master1                      1/1     Running   1 (19m ago)   52m
kube-apiserver-master1            1/1     Running   1 (18m ago)   52m
kube-controller-manager-master1   1/1     Running   1 (19m ago)   52m
kube-proxy-5lcl5                  1/1     Running   1 (19m ago)   49m
kube-proxy-ft4lj                  1/1     Running   1 (19m ago)   52m
kube-proxy-js88l                  1/1     Running   1 (19m ago)   49m
kube-scheduler-master1            1/1     Running   1 (18m ago)   52m

部署应用验证k8s集群

部署nginx服务
vi deploy_nginx.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
  namespace: default
spec:
  replicas: 2
  selector:
    matchLabels:
      appname: nginx
  template:
    metadata:
      labels:
        appname: nginx
    spec:
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: appname
                  operator: In
                  values:
                  - nginx
              topologyKey: kubernetes.io/hostname
            weight: 50
      containers:
      - name: nginx
        image: nginx:alpine
        ports:
        - containerPort: 80
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /
            port: 80
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 200m
            memory: 512Mi
          limits:
            cpu: 200m
            memory: 512Mi
---
apiVersion: v1   
kind: Service
metadata:
  name: nginx
  namespace: default
spec:
  type: NodePort
  selector:
    appname: nginx
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30080

访问验证服务
http://10.0.0.31:30080

创建一个应用,用于测试
vi nginx-test.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-test
  namespace: test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-test
  template:
    metadata:
      labels:
        app: nginx-test
    spec:
      containers:
        - name: nginx-test
          image: nginx:alpine
          ports:
            - containerPort: 80

验证k8s网络可用性

[root@master1 ~]# kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-7dc8c9c7c7-6rhw7   1/1     Running   0          25m   10.224.1.2   node1   <none>           <none>
nginx-7dc8c9c7c7-l8mcm   1/1     Running   0          25m   10.224.2.2   node2   <none>           <none>

[root@master1 ~]# kubectl -n test get pod -o wide
NAME                          READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-test-67d98c7b59-4zz2n   1/1     Running   0          16m   10.224.1.3   node1   <none>           <none>

[root@master1 ~]# kubectl -n test exec -it nginx-test-67d98c7b59-4zz2n -- /bin/sh
/ # ping 10.224.2.2
PING 10.224.2.2 (10.224.2.2): 56 data bytes
64 bytes from 10.224.2.2: seq=0 ttl=62 time=1.114 ms
64 bytes from 10.224.2.2: seq=1 ttl=62 time=0.980 ms
64 bytes from 10.224.2.2: seq=2 ttl=62 time=1.808 ms
^C
--- 10.224.2.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.980/1.300/1.808 ms


验证k8s内部dns可用性

查看k8s内部dns服务器地址

[root@master1 ~]# kubectl -n kube-system get service kube-dns -o yaml
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  creationTimestamp: "2024-03-17T02:46:37Z"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: CoreDNS
  name: kube-dns
  namespace: kube-system
  resourceVersion: "223"
  uid: 3beed986-86dc-4dab-b032-1de86366d3e7
spec:
  clusterIP: 10.96.0.10
  clusterIPs:
  - 10.96.0.10
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - name: dns
    port: 53
    protocol: UDP
    targetPort: 53
  - name: dns-tcp
    port: 53
    protocol: TCP
    targetPort: 53
  - name: metrics
    port: 9153
    protocol: TCP
    targetPort: 9153
  selector:
    k8s-app: kube-dns
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

查看service资源

[root@master1 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        3h24m
nginx        NodePort    10.109.223.79   <none>        80:30080/TCP   24m

资源记录

SVC_NAME.NS_NAME.DOMAIN.LTD.
nginx.default.svc.cluster.local.

查看dns解析

[root@master1 ~]# dig -t A nginx.default.svc.cluster.local. @10.96.0.10

; <<>> DiG 9.16.23-RH <<>> -t A nginx.default.svc.cluster.local. @10.96.0.10
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 23373
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: 55472bf61b671b94 (echoed)
;; QUESTION SECTION:
;nginx.default.svc.cluster.local. IN	A

;; ANSWER SECTION:
nginx.default.svc.cluster.local. 30 IN	A	10.109.223.79

;; Query time: 0 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Sun Mar 17 14:11:15 CST 2024
;; MSG SIZE  rcvd: 119

负载均衡 ingress-nginx-controller

在这里插入图片描述

https://github.com/kubernetes/ingress-nginx

版本支持:
在这里插入图片描述

下载部署文件

wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.10.0/deploy/static/provider/cloud/deploy.yaml

下载镜像,在我镜像仓库

docker pull registry.cn-beijing.aliyuncs.com/wuxingge123/ingress-nginx.controller:v1.10.0
docker pull registry.cn-beijing.aliyuncs.com/wuxingge123/ingress-nginx.kube-webhook-certgen:v1.4.0

yaml文件修改:

service修改

...
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.10.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  externalTrafficPolicy: Local
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - appProtocol: http
    name: http
    port: 80
    protocol: TCP
    targetPort: http
  - appProtocol: https
    name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: NodePort  #修改为NodePort

deployment修改为DaemonSet

apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.10.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  minReadySeconds: 0
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/component: controller
      app.kubernetes.io/instance: ingress-nginx
      app.kubernetes.io/name: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/component: controller
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.10.0
    spec:
      hostNetwork: true  #新增 hostNetwork: true
      containers:
      - args:
        - /nginx-ingress-controller
        - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
        - --election-id=ingress-nginx-leader
        - --controller-class=k8s.io/ingress-nginx
        - --ingress-class=nginx
        - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
        - --validating-webhook=:8443
        - --validating-webhook-certificate=/usr/local/certificates/cert
        - --validating-webhook-key=/usr/local/certificates/key
        - --enable-metrics=false
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LD_PRELOAD
          value: /usr/local/lib/libmimalloc.so
        image: registry.cn-beijing.aliyuncs.com/wuxingge123/ingress-nginx.controller:v1.10.0
        imagePullPolicy: IfNotPresent
        lifecycle:
          preStop:
            exec:
              command:
              - /wait-shutdown
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: controller
        ports:
        - containerPort: 80
          hostPort: 80 #新增
          name: http
          protocol: TCP
        - containerPort: 443
          hostPort: 443 #新增
          name: https
          protocol: TCP
        - containerPort: 8443
          hostPort: 8443 #新增
          name: webhook
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 100m
            memory: 90Mi
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          readOnlyRootFilesystem: false
          runAsNonRoot: true
          runAsUser: 101
          seccompProfile:
            type: RuntimeDefault
        volumeMounts:
        - mountPath: /usr/local/certificates/
          name: webhook-cert
          readOnly: true
      dnsPolicy: ClusterFirst
      nodeSelector:
        kubernetes.io/os: linux
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
      volumes:
      - name: webhook-cert
        secret:
          secretName: ingress-nginx-admission

测试ingress-nginx

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-test
  template:
    metadata:
      labels:
        app: nginx-test
    spec:
      containers:
        - name: nginx-test
          image: nginx:alpine
          ports:
            - containerPort: 80
---
apiVersion: v1   
kind: Service
metadata:
  name: nginx-test
spec:
  selector:
    app: nginx-test
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
---
apiVersion: networking.k8s.io/v1 
kind: Ingress
metadata:
  name: nginx-test
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
    - host: mynginx.wuxingge.online
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: nginx-test
                port:
                  number: 80

最外层代理
cat mynginx.wuxingge.online.conf

server {
    listen    443 ssl;
    server_name  mynginx.wuxingge.online;
    ssl_certificate  ssl/fullchain.cer;
    ssl_certificate_key ssl/wuxingge.online.key;
    ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
    ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE;
    ssl_prefer_server_ciphers on;
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    location / {
	    proxy_set_header Host $host;
        proxy_pass http://x.x.x.x; #k8s node节点IP
    }
}
server {
    listen      80;
    server_name  mynginx.wuxingge.online;
    rewrite ^(.*)$ https://${server_name}$1 permanent;
}

负载均衡 MetalLB

https://blog.csdn.net/wuxingge/article/details/139430747

kube-proxy模式修改

修改kube-proxy的configmap

kubectl -n kube-system edit configmaps kube-proxy
#修改
mode: "ipvs"

重启kube-proxy

kubectl -n kube-system rollout restart daemonset kube-proxy

调整Node节点的最大可运行Pod数量

查看当前主机运行pod数量

kubectl describe node node1  |grep -A 13 Capacity

vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf

Environment="KUBELET_NODE_MAX_PODS=--max-pods=250"
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_NODE_MAX_PODS

重启kubelet服务

systemctl daemon-reload
systemctl restart kubelet
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐