Metrics Server配置
vim /etc/kubernetes/manifests/kube-controller-manager.yaml–horizontal-pod-autoscaler-use-rest-clients=true[root@k8s-master metrics-server]# kubectl apply -f .Unable to connect to the server: proxyconn
·
问题引入
在学习k8s的时候,用到kubectl top
命令查看资源使用情况,出现命令不可用,使用kubectl top -h
命令提示,This command requires Metrics Server to be correctly configured and working on the server.
要求正确配置Metrics Server并在服务器上工作。因此下文为安装Metrics Server组件的步骤。
配置步骤
下载需要的yaml文件
git clone https://github.com/kubernetes/kubernetes.git
cd ./kubernetes/cluster/addons/metrics-server
vim metrics-server-deployment.yaml
注意标注# 修改后
的位置
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metrics-server-config
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
data:
NannyConfiguration: |-
apiVersion: nannyconfig/v1alpha1
kind: NannyConfiguration
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server-v0.5.2
namespace: kube-system
labels:
k8s-app: metrics-server
addonmanager.kubernetes.io/mode: Reconcile
version: v0.5.2
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.5.2
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.5.2
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
nodeSelector:
kubernetes.io/os: linux
containers:
- name: metrics-server
image: registry.aliyuncs.com/google_containers/metrics-server:v0.5.2 # 修改后
command:
- /metrics-server
- --metric-resolution=30s
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
- --cert-dir=/tmp
- --secure-port=10250
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
failureThreshold: 3
livenessProbe:
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
failureThreshold: 3
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- name: metrics-server-nanny
image: edwordmr/addon-resizer # 修改后
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 5m
memory: 50Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: metrics-server-config-volume
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
- --cpu=80m # 修改后
- --extra-cpu=0.5m
- --memory=80Mi # 修改后
- --extra-memory=8Mi # 修改后
- --threshold=5
- --deployment=metrics-server-v0.5.2
- --container=metrics-server
- --poll-period=30000
- --estimator=exponential
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
# - --minClusterSize={{ metrics_server_min_cluster_size }} # 修改后
# Use kube-apiserver metrics to avoid periodically listing nodes.
- --use-metrics=true
volumes:
- name: metrics-server-config-volume
configMap:
name: metrics-server-config
- emptyDir: {}
name: tmp-dir
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
执行kubectl apply -f .
[root@k8s-master metrics-server]# kubectl apply -f .
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
configmap/metrics-server-config created
deployment.apps/metrics-server-v0.5.2 created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
[root@k8s-master metrics-server]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6d8c4cb4d-4h42g 1/1 Running 5 (16h ago) 6d16h
coredns-6d8c4cb4d-hfgsc 1/1 Running 8 (12h ago) 6d16h
etcd-k8s-master 1/1 Running 9 (12h ago) 6d16h
kube-apiserver-k8s-master 1/1 Running 9 (12h ago) 17h
kube-controller-manager-k8s-master 1/1 Running 4 (12h ago) 17h
kube-flannel-ds-4db68 1/1 Running 8 (16h ago) 6d16h
kube-flannel-ds-gpbl2 1/1 Running 7 (16h ago) 6d16h
kube-flannel-ds-t9tl8 1/1 Running 11 (12h ago) 6d16h
kube-proxy-8s9m2 1/1 Running 5 (16h ago) 6d16h
kube-proxy-8vxhp 1/1 Running 9 (12h ago) 6d16h
kube-proxy-tshbr 1/1 Running 5 (16h ago) 6d16h
kube-scheduler-k8s-master 1/1 Running 10 (12h ago) 6d16h
metrics-server-v0.5.2-7f45b6cf57-4bzg9 2/2 Running 0 75s
[root@k8s-master metrics-server]# kubectl get apiservices
NAME SERVICE AVAILABLE AGE
v1. Local True 6d16h
v1.admissionregistration.k8s.io Local True 6d16h
v1.apiextensions.k8s.io Local True 6d16h
v1.apps Local True 6d16h
v1.authentication.k8s.io Local True 6d16h
v1.authorization.k8s.io Local True 6d16h
v1.autoscaling Local True 6d16h
v1.batch Local True 6d16h
v1.certificates.k8s.io Local True 6d16h
v1.coordination.k8s.io Local True 6d16h
v1.discovery.k8s.io Local True 6d16h
v1.events.k8s.io Local True 6d16h
v1.networking.k8s.io Local True 6d16h
v1.node.k8s.io Local True 6d16h
v1.policy Local True 6d16h
v1.rbac.authorization.k8s.io Local True 6d16h
v1.scheduling.k8s.io Local True 6d16h
v1.storage.k8s.io Local True 6d16h
v1beta1.batch Local True 6d16h
v1beta1.discovery.k8s.io Local True 6d16h
v1beta1.events.k8s.io Local True 6d16h
v1beta1.flowcontrol.apiserver.k8s.io Local True 6d16h
v1beta1.metrics.k8s.io kube-system/metrics-server True 103s
v1beta1.node.k8s.io Local True 6d16h
v1beta1.policy Local True 6d16h
v1beta1.storage.k8s.io Local True 6d16h
v1beta2.flowcontrol.apiserver.k8s.io Local True 6d16h
v2.autoscaling Local True 6d16h
v2beta1.autoscaling Local True 6d16h
v2beta2.autoscaling Local True 6d16h
[root@k8s-master metrics-server]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master 331m 8% 1518Mi 19%
k8s-node1 91m 2% 569Mi 7%
k8s-node2 139m 3% 634Mi 8%
更多推荐
已为社区贡献1条内容
所有评论(0)