K8S通过Ingress用域名的方式对外网提供服务
1、环境准备:KBS已经部署了DNS插件,私有仓库等2、编辑好如下配置文件,注意标红的部份[root@k8s1 deploy]# cat namespace.yamlapiVersion: v1kind: Namespacemetadata:name: ingress-nginx[root@k8s1 deploy]# cat default-backend.yaml...
1、环境准备:
KBS已经部署了DNS插件,私有仓库等
2、编辑好如下配置文件,注意标红的部份
[root@k8s1 deploy]# cat namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
[root@k8s1 deploy]# cat default-backend.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
app: default-http-backend
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: re.bcdgptv.com.cn/defaultbackend:v1.4
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: ingress-nginx
labels:
app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: default-http-backend
[root@k8s1 deploy]# cat configmap.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx
[root@k8s1 deploy]# cat tcp-services-configmap.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
[root@k8s1 deploy]# cat udp-services-configmap.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
[root@k8s1 deploy]# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
[root@k8s1 deploy]# cat with-rbac.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app: ingress-nginx
template:
metadata:
labels:
app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
serviceAccountName: nginx-ingress-serviceaccount
hostNetwork: true
containers:
- name: nginx-ingress-controller
image: re.bcdgptv.com.cn/nginx-ingress-controller:v0.11.0
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
nodeSelector:
install/ingress: "true"
由于不可描述的原因,GOOGLE的镜像是无法下载的,提前从别的地方下载,再上传到私有仓库,这样省事
[root@k8s2 ~]# docker pull quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
0.11.0: Pulling from kubernetes-ingress-controller/nginx-ingress-controller
d0c199d1a37e: Already exists
781dcf2640af: Already exists
25a6482c60cd: Already exists
98b4751d48d3: Already exists
2838cb0735f3: Already exists
3b1e1259882f: Already exists
dfbcff53df24: Already exists
af05f293abc3: Already exists
01034fed7596: Already exists
Digest: sha256:885b65cec9e58c4829be447af4b0b00ecc40c09e0b9e9f662374f308e536c217
Status: Image is up to date for quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.11.0
[root@k8s2 ~]# docker tag 7f6aa8354f0c re.bcdgptv.com.cn/nginx-ingress-controller:v0.11.0
[root@k8s2 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
quay.io/kubernetes-ingress-controller/nginx-ingress-controller 0.11.0 7f6aa8354f0c 15 months ago 211MB
re.bcdgptv.com.cn/nginx-ingress-controller v0.11.0 7f6aa8354f0c 15 months ago 211MB
registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 19 months ago 209MB
[root@k8s2 ~]# docker push re.bcdgptv.com.cn/nginx-ingress-controller:v0.11.0
The push refers to repository [re.bcdgptv.com.cn/nginx-ingress-controller]
bdb94609f078: Pushed
114816d132ff: Pushed
a54d3df64e88: Pushed
12f55245fb2a: Pushed
81ad5b2b5773: Pushed
b6a7ffb10496: Pushed
8b144e141cc2: Pushed
8a126625a40f: Pushed
684c19bf2c27: Pushed
v0.11.0: digest: sha256:6b698df1ccad9cd4d6d62dc00c4cb24512503d0dee56f8f79ff869c32feecd5f size: 2201
[root@k8s3 ~]# docker pull mirrorgooglecontainers/defaultbackend:1.4
1.4: Pulling from mirrorgooglecontainers/defaultbackend
5f68dfd9f8d7: Pull complete
Digest: sha256:05cb942c5ff93ebb6c63d48737cd39d4fa1c6fa9dc7a4d53b2709f5b3c8333e8
Status: Downloaded newer image for mirrorgooglecontainers/defaultbackend:1.4
[root@k8s3 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
re.bcdgptv.com.cn/war v1 1ced8bd413b7 13 days ago 470MB
coredns/coredns 1.2.6 f59dcacceff4 6 months ago 40MB
mirrorgooglecontainers/defaultbackend 1.4 846921f0fe0e 19 months ago 4.84MB
registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 19 months ago 209MB
[root@k8s3 ~]# docker tag 846921f0fe0e re.bcdgptv.com.cn/defaultbackend:v1.4
[root@k8s3 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
re.bcdgptv.com.cn/war v1 1ced8bd413b7 13 days ago 470MB
coredns/coredns 1.2.6 f59dcacceff4 6 months ago 40MB
mirrorgooglecontainers/defaultbackend 1.4 846921f0fe0e 19 months ago 4.84MB
re.bcdgptv.com.cn/defaultbackend v1.4 846921f0fe0e 19 months ago 4.84MB
registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 19 months ago 209MB
[root@k8s3 ~]# docker push re.bcdgptv.com.cn/defaultbackend:v1.4
The push refers to repository [re.bcdgptv.com.cn/defaultbackend]
d62604d5d244: Pushed
v1.4: digest: sha256:865b0c35e6da393b8e80b7e3799f777572399a4cff047eb02a81fa6e7a48ed4b size: 528
同样也把要用到的APACHE和NGINX的镜像准备好,放到私有仓库
加载应用YAML文件
kubectl create -f namespace.yaml
kubectl create -f default-backend.yaml
kubectl create -f configmap.yaml
kubectl create -f tcp-services-configmap.yaml
kubectl create -f udp-services-configmap.yaml
kubectl create -f rbac.yaml
kubectl create -f with-rbac.yaml
[root@k8s1 deploy]# kubectl get all -n ingress-nginx
NAME READY STATUS RESTARTS AGE
pod/default-http-backend-dddf85766-zhmqk 1/1 Running 1 4h
pod/nginx-ingress-controller-tk8q4 1/1 Running 1 2h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/default-http-backend ClusterIP 10.254.173.22 <none> 80/TCP 4h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/nginx-ingress-controller 1 1 1 1 1 install/ingress=true 2h
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deployment.apps/default-http-backend 1 1 1 1 4h
NAME DESIRED CURRENT READY AGE
replicaset.apps/default-http-backend-dddf85766 1 1 1 4h
按照配置,pod/nginx-ingress-controller-tk8q4将会在K8S2这台机器上面部署
kubectl label nodes k8s2 install/ingress=true
[root@k8s3 ~]# kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s1 Ready <none> 50d v1.10.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s1
k8s2 Ready <none> 50d v1.10.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,install/ingress=true,kubernetes.io/hostname=k8s2
k8s3 Ready <none> 50d v1.10.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s3
3、部署应用
[root@k8s1 pv]# cat apache.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: apache1
spec:
replicas: 2
template:
metadata:
labels:
run: apache1
spec:
containers:
- name: apache1
image: re.bcdgptv.com.cn/http:v2.4
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: apache1
labels:
run: apache1
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 8888
selector:
run: apache1
[root@k8s1 pv]# cat nginx.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx1
spec:
replicas: 2
template:
metadata:
labels:
run: nginx1
spec:
containers:
- name: nginx1
image: re.bcdgptv.com.cn/nginx:v1.79
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx1
labels:
run: nginx1
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 8889
selector:
run: nginx1
[root@k8s1 pv]# kubectl create -f nginx.yml
deployment.extensions "nginx1" created
service "nginx1" created
[root@k8s1 pv]# kubectl create -f apache.yml
deployment.extensions "apache1" created
service "apache1" created
[root@k8s1 pv]# kubectl get all
NAME READY STATUS RESTARTS AGE
pod/apache1-7f4b94bc74-hvxqz 1/1 Running 0 3m
pod/apache1-7f4b94bc74-s6klv 1/1 Running 0 3m
pod/nginx1-76986d74bc-22skb 1/1 Running 0 16m
pod/nginx1-76986d74bc-9r2lr 1/1 Running 0 16m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/apache1 NodePort 10.254.122.230 <none> 80:8888/TCP 3m
service/kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 50d
service/nginx1 NodePort 10.254.52.107 <none> 80:8889/TCP 16m
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deployment.apps/apache1 2 2 2 2 3m
deployment.apps/nginx1 2 2 2 2 16m
NAME DESIRED CURRENT READY AGE
replicaset.apps/apache1-7f4b94bc74 2 2 2 3m
replicaset.apps/nginx1-76986d74bc 2 2 2 16m
根据域名转发到对应的服务
[root@k8s1 pv]# cat ingress.yml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress.test
namespace: default
spec:
rules:
- host: apache.bcdgptv.com.cn
http:
paths:
- path: /
backend:
serviceName: apache1
servicePort: 80
- host: nginx.bcdgptv.com.cn
http:
paths:
- path: /
backend:
serviceName: nginx1
servicePort: 80
[root@k8s1 pv]# kubectl create -f ingress.yml
ingress.extensions "ingress.test" created
把测试的域名对应在发布本实验中pod/nginx-ingress-controller-tk8q4的NODE节点的IP,即K8S2的IP,进行测试,实现了K8S通过Ingress用域名的方式对外网提供服务
更多推荐
所有评论(0)