一、安装依赖包

yum install -y epel-release wget conntrack ipvsadm ipset jq iptables curl sysstat libseccomp && /usr/sbin/modprobe ip_vs

二、部署kubelet组件

1、下载和分发kubelet二进制文件

wget https://dl.k8s.io/v1.14/kubernetes-server-linux-amd64.tar.gz
tar -xzf kubernetes-server-linux-amd64.tar.gz -C /usr/local/
cd /usr/local/kubernetes/server/bin/ && cp kubelet kube-proxy /usr/local/bin -av && cd
scp  kubelet kube-proxy node1:/usr/local/bin
scp  kubelet kube-proxy node2:/usr/local/bin
scp  kubelet kube-proxy k8s-master2:/usr/local/bin
scp  kubelet kube-proxy k8s-master3:/usr/local/bin

2、创建kubelet bootstrap kubeconfig文件 (k8s-master1上执行)

kubelet bootstrap kubeconfig文件创建三次,分别把k8s-master1改成k8s-master2、k8s-master3。
证书中写入 Token 而非证书,证书后续由 controller-manager 创建。

k8s-master1:
#创建 token
export BOOTSTRAP_TOKEN=$(kubeadm token create \
  --description kubelet-bootstrap-token \
  --groups system:bootstrappers:k8s-master3 \
  --kubeconfig ~/.kube/config)

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/cert/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.252:8443 \
  --kubeconfig=kubelet-bootstrap-k8s-master3.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=kubelet-bootstrap-k8s-master3.kubeconfig

# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=kubelet-bootstrap-k8s-master3.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-master3.kubeconfig

改成master2、master3、node1、node2

3、查看 kubeadm 为各节点创建的 token:

# kubeadm token list --kubeconfig ~/.kube/config
TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION               EXTRA GROUPS
96dynz.keh9dmbyzxhkwe94   1h        2019-07-16T14:40:16+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:node2
b3nhgy.dut4ikk8fk867iha   23h       2019-07-17T13:39:53+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master3
lh52k2.aczplvgpjx9zkkcz   23h       2019-07-17T13:39:18+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master2
ryi1s4.h4b68dagevavcpte   30m       2019-07-16T14:10:13+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node1
wo5lzw.avz1jt67e2m0ry3o   23h       2019-07-17T13:38:00+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master1


创建的 token 有效期为 1 天,超期后将不能再被使用,且会被 kube-controller-manager 的 tokencleaner 清理(如果启用该 controller 的话);
kube-apiserver 接收 kubelet 的 bootstrap token 后,将请求的 user 设置为 system:bootstrap:,group 设置为 system:bootstrappers;
查看各 token 关联的 Secret:
# kubectl get secrets  -n kube-system | grep token
bootstrap-token-asg5dv                           bootstrap.kubernetes.io/token         7      3m16s
bootstrap-token-g8llu5                           bootstrap.kubernetes.io/token         7      2m19s
bootstrap-token-ywzwvp                           bootstrap.kubernetes.io/token         7      4m43s
default-token-tx2h7                              kubernetes.io/service-account-token   3      17h

4、分发bootstrap kubeconfig文件

scp kubelet-bootstrap-k8s-node1.kubeconfig node1:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap-node2.kubeconfig node2:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap-k8s-master1.kubeconfig k8s-master1:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap-k8s-master2.kubeconfig k8s-master2:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap-k8s-master3.kubeconfig k8s-master3:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig

5、创建和分发kubelet参数配置文件

从 v1.10 开始,kubelet 部分参数需在配置文件中配置,kubelet --help 会提示:
DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag
创建 kubelet 参数配置模板文件:
# vim /etc/kubernetes/cert/kubelet.config.json 
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/cert/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "10.0.0.21",
  "port": 10250,
  "readOnlyPort": 0,
  "cgroupDriver": "cgroupfs",
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "featureGates": {
    "RotateKubeletClientCertificate": true,
    "RotateKubeletServerCertificate": true
  },
  "clusterDomain": "zmjcd.cc.",
  "clusterDNS": ["10.254.0.2"]
}


address:API 监听地址,不能为 127.0.0.1,否则 kube-apiserver、heapster 等不能调用 kubelet 的 API;
readOnlyPort=0:关闭只读端口(默认 10255),等效为未指定;
authentication.anonymous.enabled:设置为 false,不允许匿名�访问 10250 端口;
authentication.x509.clientCAFile:指定签名客户端证书的 CA 证书,开启 HTTP 证书认证;
authentication.webhook.enabled=true:开启 HTTPs bearer token 认证;
对于未通过 x509 证书和 webhook 认证的请求(kube-apiserver 或其他客户端),将被拒绝,提示 Unauthorized;
authroization.mode=Webhook:kubelet 使用 SubjectAccessReview API 查询 kube-apiserver 某 user、group 是否具有操作资源的权限(RBAC);
featureGates.RotateKubeletClientCertificate、featureGates.RotateKubeletServerCertificate:自动 rotate 证书,证书的有效期取决于 kube-controller-manager 的 --experimental-cluster-signing-duration 参数;
"address": "10.0.0.21" 改成各自地址
需要 root 账户运行;
为各节点创建和分发 kubelet 配置文件:
scp /etc/kubernetes/cert/kubelet.config.json node2:/etc/kubernetes/cert/kubelet.config.json
scp /etc/kubernetes/cert/kubelet.config.json k8s-master1:/etc/kubernetes/cert/kubelet.config.json
scp /etc/kubernetes/cert/kubelet.config.json k8s-master2:/etc/kubernetes/cert/kubelet.config.json
scp /etc/kubernetes/cert/kubelet.config.json k8s-master3:/etc/kubernetes/cert/kubelet.config.json

6、创建和分发kubelet systemd unit文件

# vim /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/cert \
  --kubeconfig=/etc/kubernetes/cert/kubelet.kubeconfig \
  --config=/etc/kubernetes/cert/kubelet.config.json \
  --hostname-override=10.0.0.21 \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --cgroup-driver=systemd \
  --fail-swap-on=false \
  --cluster-dns=10.254.0.2\
  --cluster-domain=zmjcd.cc.\
  --v=2
  --v=2

Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target


如果设置了 --hostname-override 选项,则 kube-proxy 也需要设置该选项,否则会出现找不到 Node 的情况;
--bootstrap-kubeconfig:指向 bootstrap kubeconfig 文件,kubelet 使用该文件中的用户名和 token 向 kube-apiserver 发送 TLS Bootstrapping 请求;
K8S approve kubelet 的 csr 请求后,在 --cert-dir 目录创建证书和私钥文件,然后写入 --kubeconfig 文件;
--hostname-override=10.0.0.21 改成各自地址
为各节点创建和分发 kubelet systemd unit 文件:
scp /etc/systemd/system/kubelet.service node2:/etc/systemd/system/kubelet.service
scp /etc/systemd/system/kubelet.service node1:/etc/systemd/system/kubelet.service
scp /etc/systemd/system/kubelet.service k8s-master2:/etc/systemd/system/kubelet.service
scp /etc/systemd/system/kubelet.service k8s-master3:/etc/systemd/system/kubelet.service

7、Bootstrap Token Auth和授予权限

kublet 启动时查找配置的 --kubeletconfig 文件是否存在,如果不存在则使用 --bootstrap-kubeconfig 向 kube-apiserver 发送证书签名请求 (CSR)。
kube-apiserver 收到 CSR 请求后,对其中的 Token 进行认证(事先使用 kubeadm 创建的 token),认证通过后将请求的 user 设置为 system:bootstrap:,group 设置为 system:bootstrappers,这一过程称为 Bootstrap Token Auth。
默认情况下,这个 user 和 group 没有创建 CSR 的权限,kubelet 启动失败,错误日志如下:
# sudo journalctl -u kubelet -a |grep -A 2 'certificatesigningrequests'
Jun 28 17:50:25 node1 kubelet[5862]: F0628 17:50:25.944023    5862 server.go:273] failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:bootstrap:5sh41r" cannot create resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope
Jun 28 17:50:25 node1 kubelet[5862]: goroutine 1 [running]:
Jun 28 17:50:25 node1 kubelet[5862]: k8s.io/kubernetes/vendor/k8s.io/klog.stacks(0xc000666200, 0xc00085ee00, 0x137, 0x36f)

解决办法是:创建一个 clusterrolebinding,将 group system:bootstrappers 和 clusterrole system:node-bootstrapper 绑定:
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

8、启动kubelet服务

mkdir -pv /var/log/kubernetes && mkdir -pv /var/lib/kubelet
systemctl daemon-reload 
systemctl enable kubelet 
systemctl restart kubelet

关闭 swap 分区,否则 kubelet 会启动失败;
必须先创建工作和日志目录;
kubelet 启动后使用 --bootstrap-kubeconfig 向 kube-apiserver 发送 CSR 请求,当这个 CSR 被 approve 后,kube-controller-manager 为 kubelet 创建 TLS 客户端证书、私钥和 --kubeletconfig 文件。
注意:kube-controller-manager 需要配置 --cluster-signing-cert-file 和 --cluster-signing-key-file 参数,才会为 TLS Bootstrap 创建证书和私钥。
五个 work 节点的 csr 均处于 pending 状态;

9、approve kubelet csr请求

可以手动或自动 approve CSR 请求。推荐使用自动的方式,因为从 v1.8 版本开始,可以自动轮转approve csr 后生成的证书。
i、手动approve csr请求
查看 CSR 列表:
[root@k8s-master1 ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR                 CONDITION
node-csr-l002-EYLXbhz-YxbRBMtsHsUWq_ZV1CjD8ihoIwdIzA   27s   system:bootstrap:5sh41r   Pending

approve CSR 

[root@k8s-master1 ~]# kubectl certificate approve node-csr-gD18nmcyPUNWNyDQvCo2BMYiiA4K59BNkclFRWv1SAM
certificatesigningrequest.certificates.k8s.io "node-csr gD18nmcyPUNWNyDQvCo2BMYiiA4K59BNkclFRWv1SAM" approved

# kubectl describe csr
Name:               node-csr-l002-EYLXbhz-YxbRBMtsHsUWq_ZV1CjD8ihoIwdIzA
Labels:             <none>
Annotations:        <none>
CreationTimestamp:  Fri, 28 Jun 2019 17:52:21 +0800
Requesting User:    system:bootstrap:5sh41r
Status:             Approved,Issued
Subject:
         Common Name:    system:node:10.0.0.31
         Serial Number:  
         Organization:   system:nodes
Events:  <none>
Requesting User:请求 CSR 的用户,kube-apiserver 对它进行认证和授权;
Subject:请求签名的证书信息;
证书的 CN 是  system:node:10.0.0.31, Organization 是 system:nodes,kube-apiserver 的 Node 授权模式会授予该证书的相关权限;

ii、自动approve csr请求
创建三个 ClusterRoleBinding,分别用于自动 approve client、renew client、renew server 证书:

# cat > csr-crb.yaml <<EOF
 # Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:nodes" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF

auto-approve-csrs-for-group:自动 approve node 的第一次 CSR; 注意第一次 CSR 时,请求的 Group 为 system:bootstrappers;
node-client-cert-renewal:自动 approve node 后续过期的 client 证书,自动生成的证书 Group 为 system:nodes;
node-server-cert-renewal:自动 approve node 后续过期的 server 证书,自动生成的证书 Group 为 system:nodes;

# kubectl apply -f csr-crb.yaml
clusterrolebinding.rbac.authorization.k8s.io/auto-approve-csrs-for-group created
clusterrolebinding.rbac.authorization.k8s.io/node-client-cert-renewal created
clusterrole.rbac.authorization.k8s.io/approve-node-server-renewal-csr created
clusterrolebinding.rbac.authorization.k8s.io/node-server-cert-renewal created

10、查看kubelet情况

等待一段时间(1-10 分钟),三个节点的 CSR 都被自动 approve:
# kubectl get csr
NAME                                                   AGE   REQUESTOR                 CONDITION
node-csr-AIVLKk9Ugnp9gpQpVuB97PubO1dH_BzVqUDZk78beMI   13m   system:bootstrap:wo5lzw   Approved,Issued
node-csr-RqPz0gdEOvfxG-8T-sOGHa5nRWmMpcaEwwd6lYRP8RA   12m   system:bootstrap:lh52k2   Approved,Issued
node-csr-k2UdNwFvEc3gnAndbdmqCphUmbgc0P4uePQ6gCy0tUw   12m   system:bootstrap:b3nhgy   Approved,Issued

所有节点均 ready:
# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
10.0.0.21   Ready    <none>   9m21s   v1.15.0
10.0.0.22   Ready    <none>   8m55s   v1.15.0
10.0.0.23   Ready    <none>   8m33s   v1.15.0
10.0.0.31   Ready    <none>   23h     v1.15.0
10.0.0.32   Ready    <none>   22h     v1.15.0

kube-controller-manager 为各 node 生成了 kubeconfig 文件和公私钥:
# ll /etc/kubernetes/cert/ | grep kubelet
-rw------- 1 root root 2182 Jul 16 13:43 kubelet-bootstrap.kubeconfig
-rw------- 1 root root 1281 Jul 16 13:53 kubelet-client-2019-07-16-13-53-24.pem
lrwxrwxrwx 1 root root   59 Jul 16 13:53 kubelet-client-current.pem -> /etc/kubernetes/cert/kubelet-client-2019-07-16-13-53-24.pem
-rw-r--r-- 1 root root  797 Jul 16 13:44 kubelet.config.json
-rw-r--r-- 1 root root 2181 Jul 16 13:53 kubelet.crt
-rw------- 1 root root 1679 Jul 16 13:53 kubelet.key
-rw------- 1 root root 2322 Jul 16 13:53 kubelet.kubeconfig

11、Kubelet提供的API接口

kublet 启动后监听多个端口,用于接收 kube-apiserver 或其它组件发送的请求:

# netstat -lnpt|grep kubelet
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      6691/kubelet        
tcp        0      0 127.0.0.1:44808         0.0.0.0:*               LISTEN      6691/kubelet        
tcp        0      0 10.0.0.32:10250      0.0.0.0:*               LISTEN      6691/kubelet 
4194: cadvisor http 服务;
10248: healthz http 服务;
10250: https API 服务;注意:未开启只读端口 10255;
例如执行 kubectl ec -it nginx-ds-5rmws -- sh 命令时,kube-apiserver 会向 kubelet 发送如下请求:

POST /exec/default/nginx-ds-5rmws/my-nginx?command=sh&input=1&output=1&tty=1
kubelet 接收 10250 端口的 https 请求:

/pods、/runningpods
/metrics、/metrics/cadvisor、/metrics/probes
/spec
/stats、/stats/container
/logs
/run/、"/exec/", "/attach/", "/portForward/", "/containerLogs/" 等管理;
详情参考:https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/server/server.go#L434:3
由于关闭了匿名认证,同时开启了 webhook 授权,所有访问 10250 端口 https API 的请求都需要被认证和授权。
预定义的 ClusterRole system:kubelet-api-admin 授予访问 kubelet 所有 API 的权限:

# kubectl describe clusterrole system:kubelet-api-admin
Name:         system:kubelet-api-admin
Labels:       kubernetes.io/bootstrapping=rbac-defaults
Annotations:  rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
  Resources      Non-Resource URLs  Resource Names  Verbs
  ---------      -----------------  --------------  -----
  nodes/log      []                 []              [*]
  nodes/metrics  []                 []              [*]
  nodes/proxy    []                 []              [*]
  nodes/spec     []                 []              [*]
  nodes/stats    []                 []              [*]
  nodes          []                 []              [get list watch proxy]

12、kubet api认证和授权

kublet的配置文件kubelet.config.json配置了如下认证参数:

authentication.anonymous.enabled:设置为 false,不允许匿名访问 10250 端口;
authentication.x509.clientCAFile:指定签名客户端证书的 CA 证书,开启 HTTPs 证书认证;
authentication.webhook.enabled=true:开启 HTTPs bearer token 认证;
同时配置了如下授权参数:

authroization.mode=Webhook:开启 RBAC 授权;
kubelet 收到请求后,使用 clientCAFile 对证书签名进行认证,或者查询 bearer token 是否有效。如果两者都没通过,则拒绝请求,提示 Unauthorized:

# curl -s --cacert /etc/kubernetes/cert/ca.pem https://10.0.0.21:10250/metrics -k
Unauthorized

# curl -s --cacert /etc/kubernetes/cert/ca.pem -H "Authorization: Bearer 123456" https://10.0.0.21:10250/metrics -k
Unauthorized

通过认证后,kubelet 使用 SubjectAccessReview API 向 kube-apiserver 发送请求,查询证书或 token 对应的 user、group 是否有操作资源的权限(RBAC);

13、证书认证和授权:

# 权限不足的证书;
# curl -s --cacert /etc/kubernetes/cert/ca.pem --cert /etc/kubernetes/cert/kube-controller-manager.pem --key /etc/kubernetes/cert/kube-controller-manager-key.pem https://10.0.0.21:10250/metrics --insecure
Forbidden (user=system:kube-controller-manager, verb=get, resource=nodes, subresource=metrics)

# 使用部署 kubectl 命令行工具时创建的、具有最高权限的 admin 证书;
# curl -s -k --cacert /etc/kubernetes/cert/ca.pem --cert /etc/kubernetes/cert/admin.pem --key /etc/kubernetes/cert/admin-key.pem https://10.0.0.21:10250/metrics|head
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

--cacert、--cert、--key 的参数值必须是文件路径,如上面的 ./admin.pem 不能省略 ./,否则返回 401 Unauthorized;


bear token 认证和授权:
创建一个 ServiceAccount,将它和 ClusterRole system:kubelet-api-admin 绑定,从而具有调用 kubelet API 的权限:


kubectl create sa kubelet-api-test
kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test
SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
echo ${TOKEN}

# curl -s --cacert /etc/kubernetes/cert/ca.pem -H "Authorization: Bearer ${TOKEN}" https://10.0.0.21:10250/metrics -k |head 
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

注意:
kublet.config.json 设置 authentication.anonymous.enabled 为 false,不允许匿名证书访问 10250 的 https 服务;
参考A.浏览器访问kube-apiserver安全端口.md,创建和导入相关证书,然后访问上面的 10250 端口;

三、部署kube-proxy组件

kube-proxy 运行在所有 worker 节点上,,它监听 apiserver 中 service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡。
本文档讲解部署 kube-proxy 的部署,使用 ipvs 模式。

1、创建kube-proxy证书

# cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
   "names": [
    {
      "O": "k8s",
      "OU": "k8s Security",
      "L": "ChengDU",
      "ST": "SiChuan",
      "C": "CN"
    }
  ]
}
EOF

CN:指定该证书的 User 为 system:kube-proxy;
预定义的 RoleBinding system:node-proxier 将User system:kube-proxy 与 Role system:node-proxier 绑定,该 Role 授予了调用 kube-apiserver Proxy 相关 API 的权限;
该证书只会被 kube-proxy 当做 client 证书使用,所以 hosts 字段为空;
生成证书和私钥:
cfssl gencert -ca=/etc/k8s/ssl/ca.pem \
  -ca-key=/etc/k8s/ssl/ca-key.pem \
  -config=/etc/k8s/ssl/ca-config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

2、创建和分发kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/cert/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.252:8443 \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/cert/kube-proxy.pem \
  --client-key=/etc/kubernetes/cert/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
上一步创建的证书、私钥以及 kube-apiserver 地址被写入到 kubeconfig 文件中;

scp /etc/kubernetes/cert/kube-proxy.kubeconfig node1:/etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.kubeconfig node2:/etc/kubernetes/cert/
cp -av kube-proxy.kubeconfig /etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.kubeconfig k8s-master2:/etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.kubeconfig k8s-master3:/etc/kubernetes/cert/

3、创建kube-proxy配置文件

从 v1.10 开始,kube-proxy 部分参数可以配置文件中配置。可以使用 --write-config-to 选项生成该配置文件,或者参考 kubeproxyconfig 的类型定义源文件 :
https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/apis/kubeproxyconfig/types.go
创建 kube-proxy config 文件模板:
# cat >/etc/kubernetes/cert/kube-proxy.config.yaml <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 10.0.0.21
clientConnection:
  kubeconfig: /etc/kubernetes/cert/kube-proxy.kubeconfig
  clusterCIDR: 172.30.0.0/16
  healthzBindAddress: 10.0.0.21:10256
  hostnameOverride: 10.0.0.21
kind: KubeProxyConfiguration
metricsBindAddress: 10.0.0.21:10249
mode: "ipvs"
EOF

bindAddress: 监听地址;
clientConnection.kubeconfig: 连接 apiserver 的 kubeconfig 文件;
clusterCIDR: kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;
hostnameOverride: 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则;
mode: 使用 ipvs 模式;
红色字体改成对应主机的信息。其中clusterc idr为flannel网络地址。

为各节点创建和分发 kube-proxy 配置文件:
scp /etc/kubernetes/cert/kube-proxy.config.yaml node1:/etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.config.yaml node2:/etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.config.yaml k8s-master2:/etc/kubernetes/cert/
scp /etc/kubernetes/cert/kube-proxy.config.yaml k8s-master3:/etc/kubernetes/cert/

4、创建和分发kube-proxy systemd unit文件

vim  /etc/systemd/system/kube-proxy.service 
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/cert/kube-proxy.config.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/lib/kube-proxy/log \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

scp /etc/systemd/system/kube-proxy.service node1:/etc/systemd/system/
scp /etc/systemd/system/kube-proxy.service node2:/etc/systemd/system/
scp /etc/systemd/system/kube-proxy.service k8s-master2:/etc/systemd/system/
scp /etc/systemd/system/kube-proxy.service k8s-master3:/etc/systemd/system/

5、启动kube-proxy服务

mkdir -p /var/lib/kube-proxy/log
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

6、检查启动结果

# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube-Proxy Server
   Loaded: loaded (/etc/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2019-07-16 11:03:04 CST; 1s ago
     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 8605 (kube-proxy)
    Tasks: 0
   Memory: 10.7M
   CGroup: /system.slice/kube-proxy.service
           ‣ 8605 /usr/local/bin/kube-proxy --config=/etc/kubernetes/cert/kube-proxy.config.yaml --alsologtostderr=true --logtostderr=false --log-dir=/var/lib/kube-proxy/log --v=2

Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.953403    8605 server.go:534] Version: v1.15.0
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.956020    8605 server.go:550] Running in resource-only container "/kube-proxy"
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.956364    8605 conntrack.go:52] Setting nf_conntrack_max to 131072
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.957057    8605 config.go:187] Starting service config controller
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.957086    8605 controller_utils.go:1029] Waiting for caches to sync for service config controller
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.957104    8605 config.go:96] Starting endpoints config controller
Jul 16 11:03:04 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:04.957116    8605 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller
Jul 16 11:03:05 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:05.057196    8605 controller_utils.go:1036] Caches are synced for service config controller
Jul 16 11:03:05 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:05.057201    8605 controller_utils.go:1036] Caches are synced for endpoints config controller
Jul 16 11:03:05 node1.shuyaun.com kube-proxy[8605]: I0716 11:03:05.057246    8605 service.go:332] Adding new service port "default/kubernetes:https" at 10.254.0.1:443/TCP



# netstat -lnpt|grep kube-proxy
tcp        0      0 10.0.0.31:10249      0.0.0.0:*               LISTEN      8605/kube-proxy     
tcp6       0      0 :::10256                :::*                    LISTEN      8605/kube-proxy     

7、查看ipvs路由规则

# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.254.0.1:443 rr
  -> 10.0.0.21:6443            Masq    1      0          0         
  -> 10.0.0.22:6443            Masq    1      0          0         
  -> 10.0.0.23:6443            Masq    1      0          0         

可见将所有到 kubernetes cluster ip 443 端口的请求都转发到 kube-apiserver 的 6443 端口。
恭喜!至此node节点部署完成。 

四、验证集群功能

1、查看节点状况

# kubectl get nodes
NAME           STATUS   ROLES    AGE   VERSION
10.0.0.21   Ready    <none>   38m   v1.15.0
10.0.0.22   Ready    <none>   37m   v1.15.0
10.0.0.23   Ready    <none>   37m   v1.15.0
10.0.0.31   Ready    <none>   23h   v1.15.0
10.0.0.32   Ready    <none>   23h   v1.15.0

都为 Ready 时正常。

2、创建nginx web测试文件

# cat nginx-web.yml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-web
  labels:
    tier: frontend
spec:
  type: NodePort
  selector:
    tier: frontend
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-con
  labels:
    tier: frontend
spec:
  replicas: 3
  template:
    metadata:
      labels:
        tier: frontend
    spec:
      containers:
      - name: nginx-pod
        image: nginx
        ports:
        - containerPort: 80
执行nginx-web.yaml文件
# kubectl create -f nginx-web.yml
service/nginx-web created
deployment.extensions/nginx-con created

3、查看各个Node上Pod IP的连通性

# kubectl get pod -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP            NODE           NOMINATED NODE   READINESS GATES
nginx-con-794f7d55b7-6pml5   1/1     Running   0          33s   172.30.56.2   10.0.0.22   <none>           <none>
nginx-con-794f7d55b7-77jtm   1/1     Running   0          33s   172.30.41.2   10.0.0.32   <none>           <none>
nginx-con-794f7d55b7-8ljn9   1/1     Running   0          33s   172.30.92.2   10.0.0.21   <none>           <none>

可见,nginx 的 Pod IP 分别是 172.30.56.2、172.30.41.2、172.30.92.2,在所有 Node 上分别 ping 这三个 IP,看是否连通:
# ping 172.30.41.2
PING 172.30.41.2 (172.30.41.2) 56(84) bytes of data.
64 bytes from 172.30.41.2: icmp_seq=1 ttl=63 time=0.301 ms
64 bytes from 172.30.41.2: icmp_seq=2 ttl=63 time=0.232 ms

# ping 172.30.92.2
PING 172.30.92.2 (172.30.92.2) 56(84) bytes of data.
64 bytes from 172.30.92.2: icmp_seq=1 ttl=64 time=0.069 ms

# ping 172.30.56.2
PING 172.30.56.2 (172.30.56.2) 56(84) bytes of data.
64 bytes from 172.30.56.2: icmp_seq=1 ttl=63 time=0.316 ms

4、查看server的集群ip

# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.254.0.1      <none>        443/TCP        6d21h
nginx-web    NodePort    10.254.92.207   <none>        80:31258/TCP   3m20s

10.254.92.207为nginx service的集群ip,代理的是前面的三个pod容器应用。
PORT 80是集群IP的端口,31258是node节点上的端口,可以用nodeip:nodeport方式访问服务

5、访问服务可达性

# curl -I  10.0.0.21:31258
HTTP/1.1 200 OK
Server: nginx/1.17.1
Date: Tue, 16 Jul 2019 03:17:01 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Jun 2019 12:19:45 GMT
Connection: keep-alive
ETag: "5d121161-264"
Accept-Ranges: bytes

# curl -I 10.254.92.207
HTTP/1.1 200 OK
Server: nginx/1.17.1
Date: Tue, 16 Jul 2019 06:39:40 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Jun 2019 12:19:45 GMT
Connection: keep-alive
ETag: "5d121161-264"
Accept-Ranges: bytes
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐