246. 交付Exporters至k8s集群
文章目录1. kube-state-metrics1.1 准备基础镜像2. node-exportercadvisor1. kube-state-metrics1.1 准备基础镜像kube-state-metrics官方quay.io地址 https://quay.io/repository/coreos/kube-state-metrics?tab=info[root@k8s7-200.host
·
文章目录
1. 交付kube-state-metrics
1.1 准备基础镜像
kube-state-metrics官方quay.io地址 https://quay.io/repository/coreos/kube-state-metrics?tab=info
[root@k8s7-200.host.com /opt/src]# docker image tag 91599517197a harbor.od.com/public/kube-state-metrics:v1.5.0
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/kube-state-metrics:v1.5.0
1.2 准备资源配置清单
1.rbac.yaml
[root@k8s7-200.host.com /data/k8s-yaml/kube-state-metrics]# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: kube-system
2.deployment.yaml
[root@k8s7-200.host.com /data/k8s-yaml/kube-state-metrics]# cat deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
labels:
grafanak8sapp: "true"
app: kube-state-metrics
name: kube-state-metrics
namespace: kube-system
spec:
selector:
matchLabels:
grafanak8sapp: "true"
app: kube-state-metrics
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
grafanak8sapp: "true"
app: kube-state-metrics
spec:
containers:
- image: harbor.od.com/public/kube-state-metrics:v1.5.0
name: kube-state-metrics
ports:
- containerPort: 8080
name: http-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always
serviceAccount: kube-state-metrics
serviceAccountName: kube-state-metrics
1.3 应用资源配置清单
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/kube-state-metrics/rbac.yaml
serviceaccount/kube-state-metrics created
clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/kube-state-metrics/deployment.yaml
deployment.extensions/kube-state-metrics created
1.4 检查验证
[root@k8s7-22.host.com ~]# curl 172.7.21.16:8080/healthz
ok
2. 交付node-exporter
2.1 准备基础镜像
[root@k8s7-200.host.com /opt/src]# docker load -i node-exporter-v0.15.0.tar
[root@k8s7-200.host.com /opt/src]# docker image tag 12d51ffa2b22 harbor.od.com/public/node-exporter:v0.15.0
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/node-exporter:v0.15.0
2.2 准备资源配置清单
[root@k8s7-200.host.com /data/k8s-yaml/node-exporter]# cat daemonset.yaml
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: node-exporter
namespace: kube-system
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
selector:
matchLabels:
daemon: "node-exporter"
grafanak8sapp: "true"
template:
metadata:
name: node-exporter
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
volumes:
- name: proc
hostPath:
path: /proc
type: ""
- name: sys
hostPath:
path: /sys
type: ""
containers:
- name: node-exporter
image: harbor.od.com/public/node-exporter:v0.15.0
args:
- --path.procfs=/host_proc
- --path.sysfs=/host_sys
ports:
- name: node-exporter
hostPort: 9100
containerPort: 9100
protocol: TCP
volumeMounts:
- name: sys
readOnly: true
mountPath: /host_sys
- name: proc
readOnly: true
mountPath: /host_proc
imagePullSecrets:
- name: harbor
restartPolicy: Always
hostNetwork: true
2.3 应用资源配置清单
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/node-exporter/daemonset.yaml
daemonset.extensions/node-exporter created
2.4 检查验证
[root@k8s7-21.host.com ~]# netstat -lntup|grep 9100
tcp6 0 0 :::9100 :::* LISTEN 84172/node_exporter
[root@k8s7-21.host.com ~]# kubectl get pods -n kube-system -l daemon="node-exporter" -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-kbkt4 1/1 Running 0 2m46s 10.4.7.22 k8s7-22.host.com <none> <none>
node-exporter-p7gsp 1/1 Running 0 2m46s 10.4.7.21 k8s7-21.host.com <none> <none>
[root@k8s7-21.host.com ~]# curl -s 10.4.7.21:9100/metrics |head
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 0
go_gc_duration_seconds{quantile="0.25"} 0
go_gc_duration_seconds{quantile="0.5"} 0
go_gc_duration_seconds{quantile="0.75"} 0
go_gc_duration_seconds{quantile="1"} 0
go_gc_duration_seconds_sum 0
go_gc_duration_seconds_count 0
# HELP go_goroutines Number of goroutines that currently exist.
3. 交付cadvisor
3.1 准备基础镜像
[root@k8s7-200.host.com /opt/src]# docker load -i cadvisor-v0.28.3.tar
[root@k8s7-200.host.com /opt/src]# docker image tag 75f88e3ec333 harbor.od.com/public/cadvisor-v0.28.3
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/cadvisor-v0.28.3
3.2 准备资源配置清单
[root@k8s7-200.host.com /data/k8s-yaml/cadvisor]# cat daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cadvisor
namespace: kube-system
labels:
app: cadvisor
spec:
selector:
matchLabels:
name: cadvisor
template:
metadata:
labels:
name: cadvisor
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: cadvisor
image: harbor.od.com/public/cadvisor:v0.28.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: rootfs
mountPath: /rootfs
readOnly: true
- name: var-run
mountPath: /var/run
- name: sys
mountPath: /sys
readOnly: true
- name: docker
mountPath: /var/lib/docker
readOnly: true
ports:
- name: http
containerPort: 4194
protocol: TCP
readinessProbe:
tcpSocket:
port: 4194
initialDelaySeconds: 5
periodSeconds: 10
args:
- --housekeeping_interval=10s
- --port=4194
terminationGracePeriodSeconds: 30
volumes:
- name: rootfs
hostPath:
path: /
- name: var-run
hostPath:
path: /var/run
- name: sys
hostPath:
path: /sys
- name: docker
hostPath:
path: /data/docker
小插曲:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
污点容忍(可以容忍)
打污点
[root@k8s7-22.host.com ~]# kubectl taint node k8s7-21.host.com node-role.kubernetes.io/master=master:NoSchedule
node/k8s7-21.host.com tainted
3.3 修改运算节点软连接
[root@hdss7-21 ~]# mount -o remount,rw /sys/fs/cgroup/
[root@hdss7-21 ~]# ln -s /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup/cpuacct,cpu
[root@hdss7-21 ~]# ll /sys/fs/cgroup/ | grep cpu
total 0
lrwxrwxrwx 1 root root 11 Jan 28 22:41 cpu -> cpu,cpuacct
lrwxrwxrwx 1 root root 11 Jan 28 22:41 cpuacct -> cpu,cpuacct
lrwxrwxrwx 1 root root 27 May 5 11:15 cpuacct,cpu -> /sys/fs/cgroup/cpu,cpuacct/
drwxr-xr-x 8 root root 0 Apr 26 11:06 cpu,cpuacct
drwxr-xr-x 7 root root 0 Jan 28 22:41 cpuset
删除污点
[root@k8s7-21.host.com ~]# kubectl taint node k8s7-21.host.com node-role.kubernetes.io/master-
3.4 应用资源配置清单
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/cadvisor/daemonset.yaml
daemonset.apps/cadvisor created
3.5 检查验证
4. 交付blackbox-exporter
4.1 准备基础镜像
https://hub.docker.com/r/prom/blackbox-exporter
https://github.com/prometheus/blackbox_exporter
[root@k8s7-200.host.com /opt/src]# docker image tag 81b70b6158be harbor.od.com/public/blackbox-exporter:v0.15.1
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/blackbox-exporter:v0.15.1
4.2 准备资源配置清单
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: blackbox-exporter
name: blackbox-exporter
namespace: kube-system
data:
blackbox.yml: |-
modules:
http_2xx:
prober: http
timeout: 2s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2"]
valid_status_codes: [200,301,302]
method: GET
preferred_ip_protocol: "ip4"
tcp_connect:
prober: tcp
timeout: 2s
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: blackbox-exporter
namespace: kube-system
labels:
app: blackbox-exporter
annotations:
deployment.kubernetes.io/revision: 1
spec:
replicas: 1
selector:
matchLabels:
app: blackbox-exporter
template:
metadata:
labels:
app: blackbox-exporter
spec:
volumes:
- name: config
configMap:
name: blackbox-exporter
defaultMode: 420
containers:
- name: blackbox-exporter
image: harbor.od.com/public/blackbox-exporter:v0.15.1
imagePullPolicy: IfNotPresent
args:
- --config.file=/etc/blackbox_exporter/blackbox.yml
- --log.level=info
- --web.listen-address=:9115
ports:
- name: blackbox-port
containerPort: 9115
protocol: TCP
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 50Mi
volumeMounts:
- name: config
mountPath: /etc/blackbox_exporter
readinessProbe:
tcpSocket:
port: 9115
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat service.yaml
# 没有指定targetPort是因为Pod中暴露端口名称为 blackbox-port
apiVersion: v1
kind: Service
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
selector:
app: blackbox-exporter
ports:
- name: blackbox-port
protocol: TCP
port: 9115
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
rules:
- host: blackbox.od.com
http:
paths:
- path: /
backend:
serviceName: blackbox-exporter
servicePort: blackbox-port
4.3 应用资源配置清单
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/configmap.yaml
configmap/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/deployment.yaml
deployment.extensions/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/service.yaml
service/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/ingress.yaml
ingress.extensions/blackbox-exporter created
4.4 DNS解析
[root@k8s7-11.host.name ~]# tail /var/named/od.com.zone
blackbox A 10.4.7.10
4.5 检查验证
更多推荐
已为社区贡献44条内容
所有评论(0)