K8S组件scheduler和controller-manager报错:dial tcp 127.0.0.1:10252: connect: connection refused
K8S组件scheduler和controller-manager报错:dial tcp 127.0.0.1:10252: connect: connection refused 解决
·
组件scheduler和controller-manager报错:dial tcp 127.0.0.1:10252: connect: connection refused
使用工具kubeadm
安装K8S
完成后,有时候会出现以下情况
[root@k8s-master102 yaml]# kubectl get pod,cs -n kube-system
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME READY STATUS RESTARTS AGE
pod/calico-kube-controllers-6d9cdcd744-ppwz8 1/1 Running 3 2d17h
pod/calico-node-7sppg 1/1 Running 1 45h
pod/calico-node-rfq2d 1/1 Running 0 40h
pod/calico-node-stz7t 1/1 Running 0 45h
pod/coredns-74ff55c5b-mq88r 1/1 Running 2 2d17h
pod/coredns-74ff55c5b-w5p2b 1/1 Running 3 2d17h
pod/etcd-k8s-master102 1/1 Running 1 2d17h
pod/kube-apiserver-k8s-master102 1/1 Running 1 2d17h
pod/kube-controller-manager-k8s-master102 1/1 Running 1 2d17h
pod/kube-proxy-dl64v 1/1 Running 2 2d17h
pod/kube-proxy-gqhqw 1/1 Running 3 2d17h
pod/kube-proxy-wl57r 1/1 Running 1 2d17h
pod/kube-scheduler-k8s-master102 1/1 Running 1 2d17h
NAME STATUS MESSAGE ERROR
componentstatus/controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
componentstatus/scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
componentstatus/etcd-0 Healthy {"health":"true"}
对已经启动的pod有时候会出现访问失败等一系列诡异的情况(可能有些诡异现象也不这个原因造成的),出现这个情况的原因是
kubeadm init
默认使用的kube-controller-manager.yaml
和kube-scheduler.yaml
设置的默认端口是0
,而解决办法就是在对应的两个文件中注释掉就可以,这个操作需要在台master节点都要执行,这两个文件默认的位置在:/etc/kubernetes/manifests/
目录下
解决步骤
修改对应配置文件
- 修改文件
kube-controller-manager.yaml
- 编辑文件
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
-
找到
spec.containers.command
中的--port=0
,在其前添加#
将此行注释 -
修改后的文件内容
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
- --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
- --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
- --bind-address=127.0.0.1
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --cluster-name=kubernetes
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --leader-elect=true
# - --port=0
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --use-service-account-credentials=true
image: k8s.gcr.io/kube-controller-manager:v1.20.15
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10257
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-controller-manager
resources:
requests:
cpu: 200m
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /healthz
port: 10257
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: flexvolume-dir
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
hostNetwork: true
priorityClassName: system-node-critical
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
type: DirectoryOrCreate
name: flexvolume-dir
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
type: FileOrCreate
name: kubeconfig
status: {}
- 修改文件
kube-scheduler.yaml
- 编辑文件
vim /etc/kubernetes/manifests/kube-scheduler.yaml.yaml
-
找到
spec.containers.command
中的--port=0
,在其前添加#
将此行注释 -
修改后的文件内容
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=true
# - --port=0
image: k8s.gcr.io/kube-scheduler:v1.20.15
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
priorityClassName: system-node-critical
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
status: {}
重启服务
执行命令:
systemctl restart kubelet
重新查看
[root@k8s-master102 yaml]# kubectl get pod,cs -n kube-system
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME READY STATUS RESTARTS AGE
pod/calico-kube-controllers-6d9cdcd744-ppwz8 1/1 Running 3 2d17h
pod/calico-node-7sppg 1/1 Running 1 45h
pod/calico-node-rfq2d 1/1 Running 0 40h
pod/calico-node-stz7t 1/1 Running 0 45h
pod/coredns-74ff55c5b-mq88r 1/1 Running 2 2d17h
pod/coredns-74ff55c5b-w5p2b 1/1 Running 3 2d17h
pod/etcd-k8s-master102 0/1 Running 1 2d17h
pod/kube-apiserver-k8s-master102 1/1 Running 1 2d17h
pod/kube-controller-manager-k8s-master102 0/1 Running 0 32s
pod/kube-proxy-dl64v 1/1 Running 2 2d17h
pod/kube-proxy-gqhqw 1/1 Running 3 2d17h
pod/kube-proxy-wl57r 1/1 Running 1 2d17h
pod/kube-scheduler-k8s-master102 0/1 Running 0 67s
NAME STATUS MESSAGE ERROR
componentstatus/scheduler Healthy ok
componentstatus/controller-manager Healthy ok
componentstatus/etcd-0 Healthy {"health":"true"}
更多推荐
已为社区贡献2条内容
所有评论(0)