安装部署Dashboard

查看pod运行情况
[root@k8s01 ~]# kubectl get pods -A  -o wide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
default                tomcat-app-65f67f675b-fzvgm                  1/1     Running   0          3h15m   10.244.235.129   k8s03   <none>           <none>
default                tomcat-app-65f67f675b-qm4gk                  1/1     Running   0          3h15m   10.244.77.1      k8s04   <none>           <none>
kube-system            calico-kube-controllers-5dc87d545c-v8j89     1/1     Running   9          4h11m   10.244.236.129   k8s02   <none>           <none>
kube-system            calico-node-fr825                            1/1     Running   0          3h35m   192.168.8.84     k8s04   <none>           <none>
kube-system            calico-node-jhhtq                            1/1     Running   0          4h11m   192.168.8.82     k8s02   <none>           <none>
kube-system            calico-node-m2wtx                            1/1     Running   0          3h35m   192.168.8.85     k8s05   <none>           <none>
kube-system            calico-node-r6jrl                            1/1     Running   0          3h36m   192.168.8.83     k8s03   <none>           <none>
kube-system            calico-node-r7xht                            1/1     Running   0          4h11m   192.168.8.81     k8s01   <none>           <none>
kube-system            coredns-6d56c8448f-hrnkp                     1/1     Running   0          4h37m   10.244.73.65     k8s01   <none>           <none>
kube-system            coredns-6d56c8448f-nwbhx                     1/1     Running   0          4h37m   10.244.236.130   k8s02   <none>           <none>
kube-system            etcd-k8s01                                   1/1     Running   0          4h37m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-apiserver-k8s01                         1/1     Running   0          4h37m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-controller-manager-k8s01                1/1     Running   0          4h37m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-proxy-42rtf                             1/1     Running   0          3h36m   192.168.8.83     k8s03   <none>           <none>
kube-system            kube-proxy-djjp8                             1/1     Running   0          3h35m   192.168.8.84     k8s04   <none>           <none>
kube-system            kube-proxy-fb4n2                             1/1     Running   0          4h37m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-proxy-fgrn8                             1/1     Running   0          4h30m   192.168.8.82     k8s02   <none>           <none>
kube-system            kube-proxy-jfzdx                             1/1     Running   0          3h35m   192.168.8.85     k8s05   <none>           <none>
kube-system            kube-scheduler-k8s01                         1/1     Running   0          4h37m   192.168.8.81     k8s01   <none>           <none>
kube-system            kuboard-74c645f5df-kbpvz                     1/1     Running   0          174m    10.244.115.65    k8s05   <none>           <none>
kube-system            metrics-server-7dbf6c4558-fk67j              1/1     Running   0          140m    192.168.8.85     k8s05   <none>           <none>
下载yaml文件(参看附件)
  1. kubernetes-dashboard.yaml
  2. kubernetes-dashboard-admin.yaml
  3. kubernetes-dashboard-admin-bind-cluster-role.yaml
修改recommended.yaml文件
[root@k8s01 ~]# vim recommended.yaml

需要修改的内容如下所示。

---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  #增加
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      #增加
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard
---
#因为自动生成的证书很多浏览器无法使用,所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
#apiVersion: v1
#kind: Secret
#metadata:
#  labels:
#    k8s-app: kubernetes-dashboard
#  name: kubernetes-dashboard-certs
#  namespace: kubernetes-dashboard
#type: Opaque
---
创建证书
[root@k8s01 ~]# mkdir dashboard-certs
[root@k8s01 ~]# cd dashboard-certs/

#创建命名空间
[root@k8s01 ~]# kubectl create namespace kubernetes-dashboard

# 创建key文件
[root@k8s01 ~]# openssl genrsa -out dashboard.key 2048

#证书请求
[root@k8s01 ~]# openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'

#自签证书
[root@k8s01 ~]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt

#创建kubernetes-dashboard-certs对象
[root@k8s01 ~]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
一、安装dashboard
[root@k8s01 ~]# kubectl create -f kubernetes-dashboard.yaml
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
Error from server (AlreadyExists): error when creating "kubernetes-dashboard.yaml": namespaces "kubernetes-dashboard" already exists

注意:这里可能会报如下所示。

Error from server (AlreadyExists): error when creating "kubernetes-dashboard.yaml": namespaces "kubernetes-dashboard" already exists

这是因为我们在创建证书时,已经创建了kubernetes-dashboard命名空间,所以,直接忽略此错误信息即可。

查看安装结果
[root@k8s01 ~]# kubectl get pods -A  -o wide
NAMESPACE              NAME                                         READY   STATUS              RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
default                tomcat-app-65f67f675b-fzvgm                  1/1     Running             0          3h2m    10.244.235.129   k8s03   <none>           <none>
default                tomcat-app-65f67f675b-qm4gk                  1/1     Running             0          3h2m    10.244.77.1      k8s04   <none>           <none>
kube-system            calico-kube-controllers-5dc87d545c-v8j89     1/1     Running             9          3h58m   10.244.236.129   k8s02   <none>           <none>
kube-system            calico-node-fr825                            1/1     Running             0          3h22m   192.168.8.84     k8s04   <none>           <none>
kube-system            calico-node-jhhtq                            1/1     Running             0          3h58m   192.168.8.82     k8s02   <none>           <none>
kube-system            calico-node-m2wtx                            1/1     Running             0          3h22m   192.168.8.85     k8s05   <none>           <none>
kube-system            calico-node-r6jrl                            1/1     Running             0          3h23m   192.168.8.83     k8s03   <none>           <none>
kube-system            calico-node-r7xht                            1/1     Running             0          3h58m   192.168.8.81     k8s01   <none>           <none>
kube-system            coredns-6d56c8448f-hrnkp                     1/1     Running             0          4h24m   10.244.73.65     k8s01   <none>           <none>
kube-system            coredns-6d56c8448f-nwbhx                     1/1     Running             0          4h24m   10.244.236.130   k8s02   <none>           <none>
kube-system            etcd-k8s01                                   1/1     Running             0          4h24m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-apiserver-k8s01                         1/1     Running             0          4h24m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-controller-manager-k8s01                1/1     Running             0          4h24m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-proxy-42rtf                             1/1     Running             0          3h23m   192.168.8.83     k8s03   <none>           <none>
kube-system            kube-proxy-djjp8                             1/1     Running             0          3h22m   192.168.8.84     k8s04   <none>           <none>
kube-system            kube-proxy-fb4n2                             1/1     Running             0          4h24m   192.168.8.81     k8s01   <none>           <none>
kube-system            kube-proxy-fgrn8                             1/1     Running             0          4h17m   192.168.8.82     k8s02   <none>           <none>
kube-system            kube-proxy-jfzdx                             1/1     Running             0          3h22m   192.168.8.85     k8s05   <none>           <none>
kube-system            kube-scheduler-k8s01                         1/1     Running             0          4h24m   192.168.8.81     k8s01   <none>           <none>
kube-system            kuboard-74c645f5df-kbpvz                     1/1     Running             0          161m    10.244.115.65    k8s05   <none>           <none>
kube-system            metrics-server-7dbf6c4558-fk67j              1/1     Running             0          127m    192.168.8.85     k8s05   <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-7b59f7d4df-j965c   0/1     ContainerCreating   0          17s     <none>           k8s03   <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-665f4c5ff-dwq4q         0/1     ContainerCreating   0          17s     <none>           k8s04   <none>           <none>
[root@k8s01 ~]# kubectl get service -n kubernetes-dashboard  -o wide
NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE   SELECTOR
dashboard-metrics-scraper   ClusterIP   10.96.77.12    <none>        8000/TCP        63s   k8s-app=dashboard-metrics-scraper
kubernetes-dashboard        NodePort    10.97.56.175   <none>        443:30000/TCP   63s   k8s-app=kubernetes-dashboard
二、创建dashboard管理员

创建dashboard-admin.yaml文件。

[root@k8s01 ~]# vim kubernetes-dashboard-admin.yaml

文件的内容如下所示。

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard

保存退出后执行如下命令创建管理员。

[root@k8s01 ~]# kubectl create -f kubernetes-dashboard-admin.yaml
serviceaccount/dashboard-admin created
三、为用户分配权限

创建dashboard-admin-bind-cluster-role.yaml文件。

[root@k8s01 ~]# vim kubernetes-dashboard-admin-bind-cluster-role.yaml

文件内容如下所示。

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-admin
  namespace: kubernetes-dashboard

保存退出后执行如下命令为用户分配权限。

[root@k8s01 ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep dashboard-admin | awk '{print $1}')
Name:         dashboard-admin-token-6cdg7
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 6e4d3b59-e09f-4a68-b6fc-aa23fa24306f

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImkzQ0xHQ3ZnVXQxNUY4Nzg3X3RsaDluelRqMjR6MFNzSXkxTFFxRjNXZjgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNmNkZzciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNmU0ZDNiNTktZTA5Zi00YTY4LWI2ZmMtYWEyM2ZhMjQzMDZmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.MOOMAaGlxogrsjWnRxab7g2HGtgYCPd8hg2QNnX9tcN3fGPRjBHMUBQObeYiFsm2DFkvLTp34zv4EYAdqdiFnYn6e98jg-aUpR6Bzx_bW8qyzkGA7LmXEz1gcIUXxKJD1ZWDZ7cNhlqciitQy5IhSItkBHRRuy4Ls_-8fMEPiCA26XXhOVkGmkd54l0zsjlS-A-B8OJvQtZF5hkwBAcj7CKg-fqRxgAd_xkHMp6f7UPWEF1cDK0g5-NiFOdy4gOeqyR4QQbR1nOzpdM5A8VikvANQ9KkIlS52AWbUQHZyNiKjZTP0r-NODIC6mLpRigb_yoDx1tSxvmibM7CM3QBIg
查看并复制用户Token

在命令行执行如下命令。

[root@k8s01 ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep dashboard-admin | awk '{print $1}')
Name:         dashboard-admin-token-6cdg7
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 6e4d3b59-e09f-4a68-b6fc-aa23fa24306f

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImkzQ0xHQ3ZnVXQxNUY4Nzg3X3RsaDluelRqMjR6MFNzSXkxTFFxRjNXZjgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNmNkZzciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNmU0ZDNiNTktZTA5Zi00YTY4LWI2ZmMtYWEyM2ZhMjQzMDZmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.MOOMAaGlxogrsjWnRxab7g2HGtgYCPd8hg2QNnX9tcN3fGPRjBHMUBQObeYiFsm2DFkvLTp34zv4EYAdqdiFnYn6e98jg-aUpR6Bzx_bW8qyzkGA7LmXEz1gcIUXxKJD1ZWDZ7cNhlqciitQy5IhSItkBHRRuy4Ls_-8fMEPiCA26XXhOVkGmkd54l0zsjlS-A-B8OJvQtZF5hkwBAcj7CKg-fqRxgAd_xkHMp6f7UPWEF1cDK0g5-NiFOdy4gOeqyR4QQbR1nOzpdM5A8VikvANQ9KkIlS52AWbUQHZyNiKjZTP0r-NODIC6mLpRigb_yoDx1tSxvmibM7CM3QBIg

可以看到,此时的Token值为:

eyJhbGciOiJSUzI1NiIsImtpZCI6ImkzQ0xHQ3ZnVXQxNUY4Nzg3X3RsaDluelRqMjR6MFNzSXkxTFFxRjNXZjgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNmNkZzciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNmU0ZDNiNTktZTA5Zi00YTY4LWI2ZmMtYWEyM2ZhMjQzMDZmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.MOOMAaGlxogrsjWnRxab7g2HGtgYCPd8hg2QNnX9tcN3fGPRjBHMUBQObeYiFsm2DFkvLTp34zv4EYAdqdiFnYn6e98jg-aUpR6Bzx_bW8qyzkGA7LmXEz1gcIUXxKJD1ZWDZ7cNhlqciitQy5IhSItkBHRRuy4Ls_-8fMEPiCA26XXhOVkGmkd54l0zsjlS-A-B8OJvQtZF5hkwBAcj7CKg-fqRxgAd_xkHMp6f7UPWEF1cDK0g5-NiFOdy4gOeqyR4QQbR1nOzpdM5A8VikvANQ9KkIlS52AWbUQHZyNiKjZTP0r-NODIC6mLpRigb_yoDx1tSxvmibM7CM3QBIg
查看dashboard界面

在浏览器中打开链接 https://192.168.8.81:30000 ,如下所示。

在这里插入图片描述

这里,我们选择Token方式登录,并输入在命令行获取到的Token,如下所示。

在这里插入图片描述

点击登录后进入dashboard,如下所示。

在这里插入图片描述

由于我们在《【K8S】K8s部署Metrics-Server服务》一文中安装了Metrics-Server服务,所以,我们可以查看节点服务器CPU和内存的使用情况,如下所示。

在这里插入图片描述

至此,dashboard 2.0.0安装成功。

附件

kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard

---
# 因为自动生成的证书很多浏览器无法使用,所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
# apiVersion: v1
# kind: Secret
# metadata:
#   labels:
#     k8s-app: kubernetes-dashboard
#   name: kubernetes-dashboard-certs
#   namespace: kubernetes-dashboard
# type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

kubernetes-dashboard-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard
kubernetes-dashboard-admin-bind-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-admin
  namespace: kubernetes-dashboard
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐