k8s 部署ingress-nginx v1.1.1(完整版)
部署ingress nginx controller1.0 获取ingress controller部署yaml文件:[root@k8s-master test]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy
部署ingress
一、yaml方式部署:
1.0 获取ingress controller部署yaml文件:
[root@k8s-master test]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml
[root@k8s-master ingress]#
[root@k8s-master ingress]#
1.1下载ingress controller镜像:
--下载镜像
docker pull liangjw/kube-webhook-certgen:v1.1.1
docker pull liangjw/ingress-nginx-controller:v1.1.1
--改名称
docker tag liangjw/kube-webhook-certgen:v1.1.1 k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1
docker tag liangjw/ingress-nginx-controller:v1.1.1 k8s.gcr.io/ingress-nginx/controller:v1.1.1
--删除old name
docker image delete liangjw/kube-webhook-certgen:v1.1.1
docker image delete liangjw/ingress-nginx-controller:v1.1.1
由于一些原因,不能下载到image,因此用国内镜像:
1.2 修改deploy.yaml文件,去掉image的’@sha256’后缀:
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1 #@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
替换为:
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1 #@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660
替换为:
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1
1.3 创建ingress controller deploy
[root@k8s-master ingress]# kubectl apply -f deploy.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
configmap/ingress-nginx-controller created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
service/ingress-nginx-controller-admission created
service/ingress-nginx-controller created
deployment.apps/ingress-nginx-controller created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
serviceaccount/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
[root@k8s-master ingress]#
k8s中大多使用nginx-ingress-controller来实现ingress, 但是脆弱的nginx-controller通过ingress解析出nginx配置, 对于某些annotation会reload nignx配置失败, 然后controller就卡死了, 不断重启, 除非删除对应的ingress.
[root@k8s-master ingress]# kubectl get pods -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-k9z4h 0/1 Completed 0 29s
ingress-nginx-admission-patch-gc9p5 0/1 Completed 0 29s
ingress-nginx-controller-776889d8cb-wd84p 1/1 Running 0 29s
[root@k8s-master ingress]#
[root@k8s-master ingress]# kubectl logs -f ingress-nginx-admission-create-k9z4h -n ingress-nginx
W0202 10:49:07.866187 1 client_config.go:615] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.
{"err":"secrets \"ingress-nginx-admission\" not found","level":"info","msg":"no secret found","source":"k8s/k8s.go:229","time":"2022-02-02T10:49:07Z"}
{"level":"info","msg":"creating new secret","source":"cmd/create.go:28","time":"2022-02-02T10:49:07Z"}
[root@k8s-master ingress]#
二、用helm工具部署ingress-nginx-controller,并禁止webhook组件
(snap是一个比yum更智能的系统包版本管理工具,可以指定版本升级、回退版本)
root@k8s-master:~/work/ingress/1.1.1# apt install snap -y
root@k8s-master:~/work/ingress/1.1.1# snap install helm --classic
helm 3.7.0 from Snapcrafters installed
root@k8s-master:~/work/ingress/1.1.1#
root@k8s-master:~/work/ingress/1.1.1#
root@k8s-master:~/work/ingress/1.1.1# helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false
Release "ingress-nginx" has been upgraded. Happy Helming!
NAME: ingress-nginx
LAST DEPLOYED: Sat Feb 5 07:19:03 2022
NAMESPACE: ingress-nginx
STATUS: deployed
REVISION: 2
TEST SUITE: None
NOTES:
The ingress-nginx controller has been installed.
Get the application URL by running these commands:
export HTTP_NODE_PORT=$(kubectl --namespace ingress-nginx get services -o jsonpath="{.spec.ports[0].nodePort}" ingress-nginx-controller)
export HTTPS_NODE_PORT=$(kubectl --namespace ingress-nginx get services -o jsonpath="{.spec.ports[1].nodePort}" ingress-nginx-controller)
export NODE_IP=$(kubectl --namespace ingress-nginx get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
An example Ingress that makes use of the controller:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example
namespace: foo
spec:
ingressClassName: nginx
rules:
- host: www.example.com
http:
paths:
- backend:
service:
name: exampleService
port:
number: 80
path: /
# This section is only required if TLS is to be enabled for the Ingress
tls:
- hosts:
- www.example.com
secretName: example-tls
If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
apiVersion: v1
kind: Secret
metadata:
name: example-tls
namespace: foo
data:
tls.crt: <base64 encoded cert>
tls.key: <base64 encoded key>
type: kubernetes.io/tls
root@k8s-master:~/work/ingress/1.1.1#
[root@k8s-master ingress]# helm install ingress-nginx -n ingress-nginx
[root@k8s-master ingress]# helm uninstall ingress-nginx -n ingress-nginx
1 查看 ingress-nginx-controller状态:
root@k8s-master:~/work/ingress/1.1.1# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
ingress-nginx ingress-nginx-controller-6bb7fdbf54-589qd 0/1 ImagePullBackOff 0 9m20s
kube-system calico-kube-controllers-958545d87-99fgc 1/1 Running 8 (81m ago) 41h
kube-system calico-node-gcrcz 1/1 Running 6 (81m ago) 41h
kube-system calico-node-gwsrh 1/1 Running 8 (81m ago) 41h
kube-system calico-node-zbkj2 1/1 Running 6 (81m ago) 41h
kube-system coredns-7f6cbbb7b8-g89bp 1/1 Running 8 (81m ago) 41h
kube-system coredns-7f6cbbb7b8-tt8ts 1/1 Running 8 (81m ago) 41h
kube-system etcd-k8s-master 1/1 Running 16 (81m ago) 41h
kube-system kube-apiserver-k8s-master 1/1 Running 16 (81m ago) 41h
kube-system kube-controller-manager-k8s-master 1/1 Running 17 (81m ago) 41h
kube-system kube-proxy-47xmf 1/1 Running 10 (81m ago) 41h
kube-system kube-proxy-4r95c 1/1 Running 9 (81m ago) 41h
kube-system kube-proxy-j4jt4 1/1 Running 8 (81m ago) 41h
kube-system kube-scheduler-k8s-master 1/1 Running 9 (81m ago) 41h
root@k8s-master:~/work/ingress/1.1.1#
2 发现是image拉取失败:
root@k8s-master:~/work/ingress/1.1.1# kubectl describe pods ingress-nginx-controller-6bb7fdbf54-589qd -n ingress-nginx
Name: ingress-nginx-controller-6bb7fdbf54-589qd
Namespace: ingress-nginx
Priority: 0
Node: k8s-node2/192.168.1.103
Start Time: Sat, 05 Feb 2022 07:21:24 +0000
Labels: app.kubernetes.io/component=controller
app.kubernetes.io/instance=ingress-nginx
app.kubernetes.io/name=ingress-nginx
pod-template-hash=6bb7fdbf54
Annotations: cni.projectcalico.org/containerID: 94cf0ed05737eb0721172af2a2d5063e4e74b51b85e51f560a487741f2f7cb30
cni.projectcalico.org/podIP: 10.122.169.189/32
cni.projectcalico.org/podIPs: 10.122.169.189/32
Status: Pending
IP: 10.122.169.189
IPs:
IP: 10.122.169.189
Controlled By: ReplicaSet/ingress-nginx-controller-6bb7fdbf54
Containers:
controller:
Container ID:
Image: k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
Image ID:
Ports: 80/TCP, 443/TCP
Host Ports: 0/TCP, 0/TCP
Args:
/nginx-ingress-controller
--publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
--election-id=ingress-controller-leader
--controller-class=k8s.io/ingress-nginx
--configmap=$(POD_NAMESPACE)/ingress-nginx-controller
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Requests:
cpu: 100m
memory: 90Mi
Liveness: http-get http://:10254/healthz delay=10s timeout=1s period=10s #success=1 #failure=5
Readiness: http-get http://:10254/healthz delay=10s timeout=1s period=10s #success=1 #failure=3
Environment:
POD_NAME: ingress-nginx-controller-6bb7fdbf54-589qd (v1:metadata.name)
POD_NAMESPACE: ingress-nginx (v1:metadata.namespace)
LD_PRELOAD: /usr/local/lib/libmimalloc.so
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-m62fd (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-m62fd:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: kubernetes.io/os=linux
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 6m37s default-scheduler Successfully assigned ingress-nginx/ingress-nginx-controller-6bb7fdbf54-589qd to k8s-node2
Warning Failed 5m52s (x2 over 6m21s) kubelet Failed to pull image "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de": rpc error: code = Unknown desc =Error response from daemon: Get "https://k8s.gcr.io/v2/": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
Warning Failed 5m11s kubelet Failed to pull image "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de": rpc error: code = Unknown desc =Error response from daemon: Get "https://k8s.gcr.io/v2/": dial tcp 142.250.157.82:443: i/o timeout
Normal Pulling 4m31s (x4 over 6m36s) kubelet Pulling image "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de"
Warning Failed 4m15s (x4 over 6m21s) kubelet Error: ErrImagePull
Warning Failed 4m15s kubelet Failed to pull image "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de": rpc error: code = Unknown desc =Error response from daemon: Get "https://k8s.gcr.io/v2/": context deadline exceeded
Warning Failed 3m48s (x6 over 6m21s) kubelet Error: ImagePullBackOff
Normal BackOff 87s (x15 over 6m21s) kubelet Back-off pulling image "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de"
root@k8s-master:~/work/ingress/1.1.1#
3 下载国内镜像:
--下载镜像
docker pull liangjw/kube-webhook-certgen:v1.1.1
docker pull liangjw/ingress-nginx-controller:v1.1.1
--改名称
docker tag liangjw/kube-webhook-certgen:v1.1.1 k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1
docker tag liangjw/ingress-nginx-controller:v1.1.1 k8s.gcr.io/ingress-nginx/controller:v1.1.1
--删除old name
docker image delete liangjw/kube-webhook-certgen:v1.1.1
docker image delete liangjw/ingress-nginx-controller:v1.1.1
修改完成后,controller的镜像内容如下:
root@k8s-master:~/work/ingress/1.1.1/zz# docker images --digests
REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE
k8s.gcr.io/ingress-nginx/controller v1.1.1 <none> 2461b2698dcd 3 weeks ago 285MB
root@k8s-master:~/work/ingress/1.1.1/zz#
4 修改:
root@k8s-master:~/work/ingress/1.1.1/zz# kubectl edit deploy ingress-nginx-controller -n ingress-nginx
deployment.apps/ingress-nginx-controller edited
root@k8s-master:~/work/ingress/1.1.1#
修改image字段:
# 修改yaml文件:
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
修改为:
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
修改镜像后,ingress-nginx-controller终于跑了起来:
导出为yaml文件:
root@k8s-master:~/work/ingress/1.1.1/zz# kubectl get deploy -n ingress-nginx -o yaml > deploy.yaml
root@k8s-master:~/work/ingress/1.1.1/zz# ll
total 16
drwxr-xr-x 2 root root 4096 Feb 5 07:44 ./
drwxr-xr-x 3 root root 4096 Feb 5 07:31 ../
-rw-r--r-- 1 root root 4491 Feb 5 07:44 deploy.yaml
root@k8s-master:~/work/ingress/1.1.1/zz#
三、测试service:
通过192.168.1.102:32670访问ingress-nginx-controller服务:
(由于此时,尚未部署ingress实例(ingress-nginx-controller的路由目的),因此返回404未找到服务)
通过192.168.1.103:32670访问ingress-nginx-controller服务:
通过https协议访问服务:
四、部署ingress 实例:
1 创建证书:
root@k8s-master:~/work/ing#
root@k8s-master:~/work/ing# openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/0=nginxsvc"
Generating a RSA private key
....+++++
...+++++
writing new private key to 'tls.key'
-----
req: Skipping unknown attribute "0"
root@k8s-master:~/work/ing#
root@k8s-master:~/work/ing# kubectl create secret tls tls-secret --cert=./tls.crt --key=./tls.key
secret/tls-secret created
root@k8s-master:~/work/ing#
root@k8s-master:~/work/ing# kubectl get secret
NAME TYPE DATA AGE
default-token-kvxvz kubernetes.io/service-account-token 3 2d18h
tls-secret kubernetes.io/tls 2 9s
root@k8s-master:~/work/ing# kubectl get secret -o wide
NAME TYPE DATA AGE
default-token-kvxvz kubernetes.io/service-account-token 3 2d18h
tls-secret kubernetes.io/tls 2 17s
root@k8s-master:~/work/ing#
2创建ingress、service、deploy:
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl apply -f delopy1.yaml
deployment.apps/deployment1 created
service/svc-1 created
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl apply -f delopy2.yaml
deployment.apps/deployment2 created
service/svc-2 created
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl apply -f delopy3.yaml
deployment.apps/deployment3 created
service/svc-3 created
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl apply -f ingress-rule.yaml
ingress.networking.k8s.io/ingress1 created
ingress.networking.k8s.io/ingress2 created
ingress.networking.k8s.io/ingress3 created
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl get pods
NAME READY STATUS RESTARTS AGE
deployment1-d56d5758d-gzxhs 1/1 Running 0 6m26s
deployment1-d56d5758d-j8bb9 1/1 Running 0 6m26s
deployment1-d56d5758d-vb2hl 1/1 Running 0 6m26s
deployment2-76d4cc985f-9qpv5 1/1 Running 0 6m19s
deployment2-76d4cc985f-l2php 1/1 Running 0 6m19s
deployment2-76d4cc985f-wlnzj 1/1 Running 0 6m19s
deployment3-5b7f4dc-fhtv2 1/1 Running 0 6m
deployment3-5b7f4dc-nj2nk 1/1 Running 0 6m
deployment3-5b7f4dc-xmjdl 1/1 Running 0 6m
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller NodePort 10.10.123.149 <none> 80:30359/TCP,443:30079/TCP 16m
ingress-nginx-controller-admission ClusterIP 10.10.14.150 <none> 443/TCP 16m
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test# kubectl get pods -n ingress-nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ingress-nginx-admission-create--1-hfhs4 0/1 Completed 0 16m 10.122.36.78 k8s-node1 <none> <none>
ingress-nginx-admission-patch--1-7vfgr 0/1 Completed 1 16m 10.122.169.136 k8s-node2 <none> <none>
ingress-nginx-controller-79b4dcf989-pbnf8 1/1 Running 0 16m 192.168.1.103 k8s-node2 <none> <none>
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test#
root@k8s-master:~/work/ingress-controller/1.1.1/test# curl www1.tsc.com:30359
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
root@k8s-master:~/work/ingress-controller/1.1.1/test# curl www2.tsc.com:30359
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
root@k8s-master:~/work/ingress-controller/1.1.1/test# curl www3.tsc.com:30359
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
root@k8s-master:~/work/ingress-controller/1.1.1/test#
delopy1.yaml 内容如下:
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment1
spec:
replicas: 3
selector:
matchLabels:
app: nginx-1
template:
metadata:
labels:
app: nginx-1
spec:
containers:
- name: nginx-1
image: wangyanglinux/myapp:v1 # v1镜像
imagePullPolicy: IfNotPresent # Always/IfNotPresent/Never
ports:
- containerPort: 80 # pod端口:80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: svc-1
name: svc-1
spec:
ports:
- name: http
port: 80 # service port
protocol: TCP
targetPort: 80 # pod port
- name: https
port: 443
protocol: TCP
targetPort: 443
selector:
app: nginx-1
type: NodePort # ClusterIP
delopy2.yaml 内容如下:
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment2
spec:
replicas: 3
selector:
matchLabels:
app: nginx-2
template:
metadata:
labels:
app: nginx-2
spec:
containers:
- name: nginx-2
image: wangyanglinux/myapp:v2 # v2镜像
imagePullPolicy: IfNotPresent # Always/IfNotPresent/Never
ports:
- containerPort: 80 # pod端口:80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: svc-2
name: svc-2
namespace: default
spec:
ports:
- name: http
port: 80 # service port
protocol: TCP
targetPort: 80 # pod port
- name: https
port: 443
protocol: TCP
targetPort: 443
selector:
app: nginx-2
type: NodePort # ClusterIP
delopy3.yaml:
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment3
spec:
replicas: 3
selector:
matchLabels:
app: nginx-3
template:
metadata:
labels:
app: nginx-3
spec:
containers:
- name: nginx-3
image: nginx # v1镜像
imagePullPolicy: IfNotPresent # Always/IfNotPresent/Never
ports:
- containerPort: 80 # pod端口:80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: svc-3
name: svc-3
spec:
ports:
- name: http
port: 80 # service port
protocol: TCP
targetPort: 80 # pod port
- name: https
port: 443
protocol: TCP
targetPort: 443
selector:
app: nginx-3
type: NodePort # ClusterIP
ingress-rule.yaml 内容如下:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress1
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: www1.tsc.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: svc-1
port:
number: 80
#name: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress2
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: www2.tsc.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: svc-2
port:
number: 80
#name: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress3
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: www3.tsc.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: svc-3
port:
number: 80
#name: http
3 访问关系:
4 分别通过service、ingress访问nginx pod:
附:
ubuntu默认安装了snapd工具。
centos 需要手动去安装snapd:
yum install epel-release -y
yum install snapd -y
systemctl enable --now snapd.socket
ln -s /var/lib/snapd/snap /snap
Snap会每天自动更新通过其安装的软件
snap install XXX ##安装软件
snap list XXX ##列出本机已安装的软件
snap refresh XXX ##升级软件
snap remove XXX ##删除软件
snap run xxx.xxx ##运行某个bin文件
snap alias XXX YYY ##把XXXalias为YYY
snap安装的软件的bin文件,位于/snap/bin下面
更多推荐
所有评论(0)