简介

K8S集群通常由两部分构成:master结点和node结点
master结点的主要工作是负责集群的控制,对pod进行调度,对令牌的管理等
node结点的主要工作是负责干活,启动容器和管理容器
master结点和node结点一般采用分布式部署

前期准备

准备三台Centos7虚拟机,修改IP地址和hostname,关闭防火墙和selinux,同步三个结点的时间,修改IP地址与hostname映射关系

hostnameip
192.168.29.143node1
192.168.29.142node2
192.168.29.144node3

node1作为集群主结点,node2和node3作为从结点
关闭swap

临时关闭
# swapoff -a
解除挂载
vi /etc/fstab
#UUID=5586e861-1922-462d-86a9-3236085f6653 swap                    swap    defaults        0 0

结点之间进行SSH免密登陆

# ssh-keygen #创建密钥
# ssh-copy-id root@node1
# ssh-copy-id root@node2
# ssh-copy-id root@node3
#验证
# ssh root@node1
# ssh root@node2
# ssh root@node3

所有结点安装kubernetes以及docker

配置kubernetes的yum源

[kubernetes]
name=Kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0

获取docker包

tar -zxvf docker-20.10.9.tgz
cp -p docker/* /usr/bin

配置docker服务

vi /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
WorkingDirectory=/usr/local/bin
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:4243 -H unix:///var/run/docker.sock --selinux-enabled=false --log-opt max-size=1g
ExecReload=/bin/kill -s HUP
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting. 
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it. 
# Only systemd 226 and above support this version. 
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers 
Delegate=yes
# kill only the docker process, not all processes in the cgroup 
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
yum makecache
yum install kubelet-1.21.7 kubeadm-1.21.7 kubectl-1.21.7 -y

安装完成后启动服务

systemctl daemon-reload
systemctl enable --now kubelet
systemctl enable --now docker

修改配置文件

vi /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}
mkdir /var/lib/kubelet -pv
vi /var/lib/kubelet/config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

重启服务

systemctl restart kubelet docker

转发配置

# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables

部署集群

node1结点进行集群初始化

[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.29.143 --image-repository  registry.aliyuncs.com/google_containers --kubernetes-version v1.21.7  --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16

执行初始化完成后根据提示进行以下操作,同时需要记录下提示画面中的token等信息,在后续结点加入集群中需要使用

[root@node1 ~]# mkdir -p $HOME/.kube
[root@node1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@node1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config

把node2和node3结点加入到集群中
若忘记记录token信息或token超过时限(24小时),可在node1结点执行以下命令重新生成token

[root@node1 ~]# kubeadm token create #创建token
[root@node1 ~]# kubeadm token list #查看token
[root@node1 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' #生成验证信息

根据初始化完毕后的提示页面中或重新创建的token信息,在node2和node3结点执行以下命令把结点加入集群中

[root@node2 ~]#kubeadm join 192.168.29.143:6443 --token jt0aws.p9ni06om5c6vqc41 --discovery-token-ca-cert-hash sha256:dde3b1db5dec438df389776202476af779d69eb0b38c8a2a77c9afa8301a94b3
[root@node3 ~]#kubeadm join 192.168.29.143:6443 --token jt0aws.p9ni06om5c6vqc41 --discovery-token-ca-cert-hash sha256:dde3b1db5dec438df389776202476af779d69eb0b38c8a2a77c9afa8301a94b3

查看集群情况

[root@node1 ~]# kubectl get nodes

此时已完成集群的部署,但是各个node结点间并不能进行网络通信,因此状态都是NotReady

部署集群网络

在集群中安装部署flannel网络进行通信,拉取flannel镜像

github flannel.yaml
[root@node1 ~]#docker pull quay.io/coreos/flannel:v0.11.0
[root@node2 ~]#docker pull quay.io/coreos/flannel:v0.11.0
[root@node3 ~]#docker pull quay.io/coreos/flannel:v0.11.0
[root@node1 ~]# vi kube-flannel.yaml
image: quay.io/coreos/flannel:v0.11.0
imagePullPolicy: Never

[root@node1 ~]# kubectl apply -f kube-flannel.yaml

此时再次查看集群状态就可看到各个结点状态都是Ready了
此时k8s集群部署完成

部署dashboard

拉取kubernetesui/dashboard镜像

github 获取 kubernetes/dashboard
[root@node1 ~]# docker pull kubernetesui/dashboard:v2.3.0
[root@node2 ~]# docker pull kubernetesui/dashboard:v2.3.0
[root@node3 ~]# docker pull kubernetesui/dashboard:v2.3.0

拉取kubernetesui/metrics-scraper镜像

[root@node1 ~]# docker pull kubernetesui/metrics-scraper:v1.0.6
[root@node2 ~]# docker pull kubernetesui/metrics-scraper:v1.0.6
[root@node3 ~]# docker pull kubernetesui/metrics-scraper:v1.0.6

部署服务

[root@node1 ~]# vi kubernetes-dashboard.yaml
#Service部分
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 31080
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard
---
[root@node1 ~]#  kubectl apply -f kubernetes-dashboard.yaml

获取登录token

[root@node1 ~]#  kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token

用户绑定

[root@node1 ~]# kubectl create clusterrolebinding system:anonymous   --clusterrole=cluster-admin   --user=system:anonymous

部署Nginx应用

拉取镜像
在node2和node3结点上提前拉取Nginx镜像

[root@node2 ~]# docker pull nginx
[root@node3 ~]# docker pull nginx

部署nginx

[root@node2 ~]# yum install nginx -y
[root@node3 ~]# yum install nginx -y

编写配置文件

[root@node1 ~]# vi nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: Never
        ports:
        - name: http
          containerPort: 80
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
          requests:
            cpu: 100m
            memory: 50Mi
        volumeMounts:
        - mountPath: /etc/nginx/
          name: conf
        volumeMounts:
        - mountPath: /usr/share/nginx/html/
          name: html
      volumes:
      - name: config
        #挂载本地
        hostPath:
         path: /etc/nginx/nginx.conf
        #挂载NFS
        nfs:
          server: 192.168.29.143
          path: "/share/nginx/conf/"
      - name: html
        hostPath:
         path: /usr/share/nginx/html/
        nfs:
          server: 192.168.29.143
          path: "/share/nginx/html/"
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer

部署Nginx

[root@node1 ~]# kubectl apply -f nginx.yaml 
# 查看部署情况
[root@node1 ~]# kubectl get pods -o wide
#查看服务情况
[root@node1 ~]# kubectl get service

部署私有镜像仓库

registry仓库
拉取镜像

[root@node1 ~]# docker pull registry

修改配置文件

[root@node1 ~]# vi /etc/docker/daemon.json
{
  "insecure-registries": ["node1:5000"]
}
[root@node2 ~]# vi /etc/docker/daemon.json
{
  "insecure-registries": ["node1:5000"]
}
[root@node3 ~]# vi /etc/docker/daemon.json
{
  "insecure-registries": ["node1:5000"]
}

重启服务

[root@node1 ~]# systemctl restart docker
[root@node2 ~]# systemctl restart docker
[root@node3 ~]# systemctl restart docker

运行容器

[root@node1 ~]# docker run -itd -v /var/lib/registry:/var/lib/registry -p 5000:5000 --restart=always --name registry registry:latest

镜像打标签

[root@node1 ~]# docker tag nginx:latest node1:5000/nginx:latest

上传镜像

[root@node1 ~]# docker push node1:5000/nginx:latest

查看现有镜像

[root@node1 ~]# curl -XGET http://node1:5000/v2/_catalog

搭建可视化

[root@node1 ~]# docker pull hyper/docker-registry-web
[root@node1 ~]# docker run -itd -p 8080:8080 --name registry-web --link registry -e REGISTRY_URL=http://192.168.29.143:5000/v2 -e REGISTRY_NAME=192.168.29.143:5000 hyper/docker-registry-web

harbor仓库
安装docker-compose

github docker/compose
[root@node1 ~]#  mv docker-compose-linux-x86_64 /usr/local/bin/docker-compose
[root@node1 ~]# chmod +x /usr/local/bin/docker-compose
[root@node1 ~]# docker-compose --version

添加host解析

[root@node1 ~]#  vi /etc/hosts
[root@node2 ~]#  vi /etc/hosts
[root@node3 ~]#  vi /etc/hosts
192.168.29.143    harbor.master.com

配置harbor

github goharbor/harbor 下载offline
[root@node1 ~]# tar -zxvf harbor-offline-installer-v2.3.4.tgz -C /usr/local/
[root@node1 ~]# cp /usr/local/harbor/harbor.yml.tmpl /usr/local/harbor/harbor.yml
[root@node1 ~]# vi /usr/local/harbor/harbor.yml
hostname: harbor.master.com
https:
  port: 443
  certificate: /usr/local/harbor/cert/harbor.crt
  private_key: /usr/local/harbor/cert/harbor.key
harbor_admin_password: Harbor12345
data_volume: /usr/local/harbor/data
database:
  password: root123
[root@node1 ~]#  mkdir /usr/local/harbor/{cert,data}

配置证书

[root@node1 ~]# openssl req -newkey rsa:4096 -nodes -sha256 -keyout /usr/local/harbor/cert/harbor.key -x509 -out /usr/local/harbor/cert/harbor.crt -days 3650

安装harbor

[root@node1 ~]# cd /usr/loca/harbor
[root@node1 harbor]# ./install.sh

配置docker

[root@node1 ~]# vi /etc/docker/daemon.json
[root@node2 ~]# vi /etc/docker/daemon.json
[root@node3 ~]# vi /etc/docker/daemon.json
"insecure-registries": ["https://harbor.master.com"]
[root@node1 ~]# systemctl daemon-reload;systemctl restart docker
[root@node2 ~]# systemctl daemon-reload;systemctl restart docker
[root@node3 ~]# systemctl daemon-reload;systemctl restart docker

配置服务

[root@node1 ~]#  vi /usr/lib/systemd/system/harbor.service
[Unit]
Description=harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/local/bin/docker-compose -f  /usr/local/harbor/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -f  /usr/local/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target

测试

#命令行
[root@node1 ~]# docker login https://harbor.master.com
#网页
https://192.168.29.143
#上传
[root@node1 ~]# docker tag nginx:latest  harbor.master.com/library/mynginx:v1
[root@node1 ~]# docker push  harbor.master.com/library/mynginx:v1
#下载
[root@node2 ~]# docker pull harbor.master.com/library/mynginx:v1

部署弹性伸缩

拉取镜像

github 获取 kubernetes-incubator/metrics-server
[root@node1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
[root@node2 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
[root@node3 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6

修改配置文件

[root@node1 ~]#  vi /metrics-server/metrics-server-deployment.yaml
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
        imagePullPolicy: Never
        command:
        - /metrics-server
        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        - --kubelet-insecure-tls

部署服务

[root@node1 metrics-server]# kubectl apply -f ./

部署弹性伸缩

[root@node1 ~]# kubectl autoscale deployment nginx-deployment --min=2 --max=8 --cpu-percent=30

[root@node1 ~]# kubectl top nodes
[root@node1 ~]# kubectl top pods
[root@node1 ~]# kubectl get hpa

测试

[root@node1 ~]# yum install httpd -y

[root@node1 ~]# ab -n 50000 -c 30 http://podIP/index.html
[root@node1 ~]# kubectl get hpa
[root@node1 ~]# kubectl get pods

部署metallb负载均衡

拉取镜像

github 获取 metallb
[root@node1 ~]# docker pull quay.io/metallb/speaker:main
[root@node1 ~]# docker pull quay.io/metallb/controller:main
[root@node2 ~]# docker pull quay.io/metallb/speaker:main
[root@node2 ~]# docker pull quay.io/metallb/controller:main
[root@node3 ~]# docker pull quay.io/metallb/speaker:main
[root@node3 ~]# docker pull quay.io/metallb/controller:main

部署服务

[root@node1 ]# kubectl create ns metallb-system
[root@node1 ]# kubectl apply -f metallb.yaml

查看运行情况

[root@node1 ]# kubectl get pods -n metallb-system -o wide
[root@node1 ]# kubectl get daemonset -n metallb-system 
[root@node1 ]# kubectl get deployment -n metallb-system

配置负载均衡IP地址段

#按集群实际情况配置
[root@node1 ]# vi example-layer2-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: my-ip-space
      protocol: layer2
      addresses:
      - 192.168.29.200-192.168.29.210

部署服务

[root@node1 ]# kubectl apply -f example-layer2-config.yaml

创建应用与服务

[root@node1 ]# kubectl apply -f tutorial-2.yaml

查看服务情况

[root@node1 ]# kubectl get service
[root@node1 ]# kubectl get pods

测试

[root@node1 ]# curl http://IP/index.html

部署prometheus监控

拉取镜像

[root@node1 ]# docker pull prom/prometheus
[root@node2 ]# docker pull prom/prometheus
[root@node3 ]# docker pull prom/prometheus

创建命名空间

[root@node1 ]# kubectl create ns monitor-system

绑定用户

[root@node1 ]# kubectl apply -f cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: prometheus
rules:
- apiGroups: [""]
  resources:
  - nodes
  - nodes/proxy
  - services
  - endpoints
  - pods
  verbs: ["get", "list", "watch"]
- apiGroups:
  - extensions
  resources:
  - ingresses
  verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: prometheus
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: prometheus
subjects:
- kind: ServiceAccount
  name: default
  namespace: monitor-system

配置服务

[root@node1 ]# kubectl apply -f config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: prometheus-server-conf
  labels:
    name: prometheus-server-conf
  namespace: monitor-system
data:
  prometheus.yml: |-
    global:
      scrape_interval: 5s
      evaluation_interval: 5s
    scrape_configs:
      - job_name: 'kubernetes-apiservers'
        kubernetes_sd_configs:
        - role: endpoints
        scheme: https
        tls_config:
          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
        relabel_configs:
        - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
          action: keep
          regex: default;kubernetes;https

      - job_name: 'kubernetes-nodes'
        scheme: https
        tls_config:
          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
        kubernetes_sd_configs:
        - role: node
        relabel_configs:
        - action: labelmap
          regex: __meta_kubernetes_node_label_(.+)
        - target_label: __address__
          replacement: kubernetes.default.svc:443
        - source_labels: [__meta_kubernetes_node_name]
          regex: (.+)
          target_label: __metrics_path__
          replacement: /api/v1/nodes/${1}/proxy/metrics

      - job_name: 'kubernetes-pods'
        kubernetes_sd_configs:
        - role: pod
        relabel_configs:
        - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
          action: keep
          regex: true
        - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
          action: replace
          target_label: __metrics_path__
          regex: (.+)
        - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
          action: replace
          regex: ([^:]+)(?::\d+)?;(\d+)
          replacement: $1:$2
          target_label: __address__
        - action: labelmap
          regex: __meta_kubernetes_pod_label_(.+)
        - source_labels: [__meta_kubernetes_namespace]
          action: replace
          target_label: kubernetes_namespace
        - source_labels: [__meta_kubernetes_pod_name]
          action: replace
          target_label: kubernetes_pod_name

      - job_name: 'kubernetes-cadvisor'
        scheme: https
        tls_config:
          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
        kubernetes_sd_configs:
        - role: node
        relabel_configs:
        - action: labelmap
          regex: __meta_kubernetes_node_label_(.+)
        - target_label: __address__
          replacement: kubernetes.default.svc:443
        - source_labels: [__meta_kubernetes_node_name]
          regex: (.+)
          target_label: __metrics_path__
          replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor

      - job_name: 'kubernetes-service-endpoints'
        kubernetes_sd_configs:
        - role: endpoints
        relabel_configs:
        - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
          action: keep
          regex: true
        - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
          action: replace
          target_label: __scheme__
          regex: (https?)
        - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
          action: replace
          target_label: __metrics_path__
          regex: (.+)
        - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
          action: replace
          target_label: __address__
          regex: ([^:]+)(?::\d+)?;(\d+)
          replacement: $1:$2
        - action: labelmap
          regex: __meta_kubernetes_service_label_(.+)
        - source_labels: [__meta_kubernetes_namespace]
          action: replace
          target_label: kubernetes_namespace
        - source_labels: [__meta_kubernetes_service_name]
          action: replace
          target_label: kubernetes_name

部署服务

[root@node1 ]# kubectl apply -f prometheus-deployment.yaml --namespace=monitor-system
apiVersion: apps/v1
kind: Deployment
metadata:
  name: prometheus-deployment
  namespace: monitor-system
spec:
  selector:
    matchLabels:
      app: prometheus-server
  template:
    metadata:
      labels:
        app: prometheus-server
    spec:
      containers:
        - name: prometheus
          image: prom/prometheus:latest
          imagePullPolicy: Never
          args:
            - "--config.file=/etc/prometheus/prometheus.yml"
            - "--storage.tsdb.path=/prometheus/"
          ports:
            - containerPort: 9090
          volumeMounts:
            - name: prometheus-config-volume
              mountPath: /etc/prometheus/
            - name: prometheus-storage-volume
              mountPath: /prometheus/
      volumes:
        - name: prometheus-config-volume
          configMap:
            defaultMode: 420
            name: prometheus-server-conf
        - name: prometheus-storage-volume
          emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  name: prometheus-service
spec:
  selector:
    app: prometheus-server
  type: LoadBalancer
  ports:
    - port: 9090
      targetPort: 9090

部署helm

安装helm

github 获取 helm
[root@node1 ~]# tar -zxvf helm-v3.7.2-linux-amd64.tar.gz
[root@node1 ~]# mv linux-amd64/helm /usr/bin/
[root@node1 ~]# helm version

添加chart仓库

[root@node1 ]# helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
[root@node1 ]# helm repo update

创建chart

[root@node1 ]# helm create  myweb
[root@node1 ]# tree myweb/
myweb/
├── charts
├── Chart.yaml
├── templates
│   ├── deployment.yaml
│   ├── _helpers.tpl
│   ├── hpa.yaml
│   ├── ingress.yaml
│   ├── NOTES.txt
│   ├── serviceaccount.yaml
│   ├── service.yaml
│   └── tests
│       └── test-connection.yaml
└── values.yaml
[root@node1 ]# rm -rf myweb/templates/*

内置对象

变量名称
Release.Name名称
Release.Time时间
Release.Namespace命名空间
Release.Service服务的名称
Release.Revision修订版本号
Release.IsUpgrade升级或回滚操作设置为 true
Release.IsInstall安装操作设置为 true

配置values对象

[root@node1 ]# vi myweb/values.yaml
replicaCount: 2
image: "nginx"
imageTag: "latest"
label:
   app: nginx

配置deployment

[root@node1 ]# vi myweb/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ .Release.Name }}-deployment
spec:
  replicas: {{ .Values.replicaCount }}
  selector:
    matchLabels:
      app: {{ .Values.label.app }}
  template:
    metadata:
      labels:
        app: {{ .Values.label.app }}
    spec:
      containers:
      - name: nginx
        image: {{ .Values.image }}:{{ .Values.imageTag }}
        ports:
        - name: http
          containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer

测试

[root@node1 ]# helm install web myweb
[root@node1 ]# helm uninstall web
[root@node1 ]# helm get manifest web
---
# Source: myweb/templates/deployment.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer
---
# Source: myweb/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-deployment
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - name: http
          containerPort: 80
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐