b4ac88df857d070c45d95ec235b363b4.png

机器分布:

192.168.xxx.112(centos 7) -master 安装服务 :etcd flannel kube-apiserver kube-controller-manager kube-scheduler

192.168.xxx.113(centos 7) -node1 安装服务 :kubelet kube-proxy flannel docker etcd

192.168.xxx.114(centos 7) -node2 安装服务 :kubelet kube-proxy flannel docker etcd

关闭防火墙:

systemctl stop firewalld.service

systemctl disable firewalld.service

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

关闭Selinux:

vim /etc/selinux/config (sed -i 's/enforcing/disabled/g' /etc/selinux/config)

SELINUX=disabled

安全设置:

setenforce 0

echo ulimit -HSn 65536 >> /etc/rc.local

echo ulimit -HSn 65536 >>/root/.bash_profile

ulimit -HSn 65536

vim /etc/security/limits.conf

* soft nofile 65536

* hard nofile 65536

* soft nproc 2048

* hard nproc 4096

* -memlock unlimited

缓存自动清理:echo 3 > /proc/sys/vm/drop_caches

------------------------------------------Master机器--------------------------------

Master 机器安装:

yum install etcd -y

yum install flannel -y

---

配置etcd(vim /etc/etcd/etcd.conf):

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_NAME="default"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.xxx.112:2379"

启动etcd :systemctl enable etcd & systemctl restart etcd & systemctl status etcd

创建容器互通网段:etcdctl mk /coreos.com/network/config '{"Network": "10.1.0.0/16"}'

查看创建结果:etcdctl get /coreos.com/network/config

---

配置flanneld(vim /etc/sysconfig/flanneld):

FLANNEL_ETCD_ENDPOINTS="http://192.168.xxx.112:2379"

FLANNEL_ETCD="http://192.168.xxx.112:2379"

FLANNEL_ETCD_PREFIX="/coreos.com/network"

启动: systemctl enable flanneld & systemctl restart flanneld 状态查看:service flanneld status

---

tar zxvf kubernetes-server-linux-amd64.tar.gz

mkdir -p /data/kubernetes/{bin,cfg,logs}

mv /data/kubernetes2/server/bin/{kube-apiserver,kube-scheduler,kube-controller-manager,kubectl} /data/kubernetes/bin

配置apiserver(vim /data/kubernetes/cfg/apiserver):

KUBE_LOGTOSTDERR="--logtostderr=false"

KUBE_LOG_DIR="--log-dir=/data/kubernetes/logs"

KUBE_LOG_LEVEL="--v=4"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_API_PORT="--insecure-port=8080"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.xxx.112:2379"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.1.0.0/16"

KUBE_ALLOW_PRIV="--allow-privileged=true"

KUBE_ADVERTISE_ADDR="--advertise-address=192.168.xxx.112"

KUBE_API_ACCOUNT="--admission_control=ServiceAccount"

KUBE_API_ARGS_CA="--client-ca-file=/data/kubernetes/cret/ca.crt"

KUBE_API_ARGS_SERVERS="--tls-cert-file=/data/kubernetes/cret/server.crt"

KUBE_API_ARGSSERVERK="--tls-private-key-file=/data/kubernetes/cret/server.key"

vi /lib/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/data/kubernetes/cfg/apiserver

ExecStart=/data/kubernetes/bin/kube-apiserver ${KUBE_API_ACCOUNT}

${KUBE_API_ARGS_CA}

${KUBE_API_ARGS_SERVERS}

${KUBE_API_ARGSSERVERK}

${KUBE_LOGTOSTDERR}

${KUBE_LOG_DIR}

${KUBE_LOG_LEVEL}

${KUBE_ETCD_SERVERS}

${KUBE_API_ADDRESS}

${KUBE_API_PORT}

${KUBE_ADVERTISE_ADDR}

${KUBE_ALLOW_PRIV}

${KUBE_SERVICE_ADDRESSES}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl daemon-reload

systemctl enable kube-apiserver

systemctl restart kube-apiserver

systemctl status kube-apiserver

---

vi /data/kubernetes/cfg/kube-scheduler

KUBE_LOGTOSTDERR="--logtostderr=false"

KUBE_LOG_DIR="--log-dir=/data/kubernetes/logs"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.xxx.112:8080"

KUBE_LEADER_ELECT="--leader-elect"

vi /lib/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/data/kubernetes/cfg/kube-scheduler

ExecStart=/data/kubernetes/bin/kube-scheduler

${KUBE_LOGTOSTDERR}

${KUBE_LOG_DIR}

${KUBE_LOG_LEVEL}

${KUBE_MASTER}

${KUBE_LEADER_ELECT}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl daemon-reload

systemctl enable kube-scheduler

systemctl restart kube-scheduler

systemctl status kube-scheduler

---

---

vim /data/kubernetes/cfg/kube-controller-manager

KUBE_LOGTOSTDERR="--logtostderr=false"

KUBE_LOG_DIR="--log-dir=/data/kubernetes/logs"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.xxx.112:8080"

KUBE_ARGS_SERVERK="--service_account_private_key_file=/data/kubernetes/cret/server.key"

KUBE_ARGS_CA="--root-ca-file=/data/kubernetes/cret/ca.crt"

vi /lib/systemd/system/kube-controller-manager.service

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/data/kubernetes/cfg/kube-controller-manager

ExecStart=/data/kubernetes/bin/kube-controller-manager ${KUBE_ARGS_SERVERK}

${KUBE_ARGS_CA}

${KUBE_LOGTOSTDERR}

${KUBE_LOG_DIR}

${KUBE_LOG_LEVEL}

${KUBE_MASTER}

${KUBE_LEADER_ELECT}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl daemon-reload

systemctl enable kube-controller-manager

systemctl restart kube-controller-manager

systemctl status kube-controller-manager

---

---------------------------------------------------node 机器-------------------------------------------------

yum install etcd -y

yum install flannel -y

yum install docker

---

配置etcd(vim /etc/etcd/etcd.conf):

ETCD_DATA_DIR="/data/etcd/default.etcd"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_NAME="default"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.xxx.112:2379"

启动etcd :systemctl enable etcd & systemctl restart etcd & systemctl status etcd

创建容器互通网段:etcdctl mk /coreos.com/network/config '{"Network": "172.17.0.0/16"}'

查看创建结果:etcdctl get /coreos.com/network/config

---

配置flanneld(vim /etc/sysconfig/flanneld):

FLANNEL_ETCD_ENDPOINTS="http://192.168.xxx.112:2379"

FLANNEL_ETCD="http://192.168.xxx.112:2379"

FLANNEL_ETCD_PREFIX="/coreos.com/network"

启动: systemctl enable flanneld & systemctl restart flanneld 状态查看:service flanneld status

---

tar zxvf kubernetes-node-linux-amd64.tar.gz

mv kubernetes kubernetes2

mkdir -p /data/kubernetes/{bin,cfg,logs}

mv /data/kubernetes2/node/bin/{kubelet,kube-proxy} /data/kubernetes/bin/

vim /data/kubernetes/cfg/kubelet.kubeconfig

apiVersion: v1

kind: Config

clusters:

- cluster:

server: http://192.168.xxx.112:8080

name: local

contexts:

- context:

cluster: local

name: local

current-context: local

---

vi /data/kubernetes/cfg/kubelet

KUBE_LOGTOSTDERR="--logtostderr=false"

KUBE_LOG_DIR="--log-dir=/data/kubernetes/logs"

KUBE_LOG_LEVEL="--v=4"

NODE_ADDRESS="--address=10.6.60.99"

NODE_PORT="--port=10250"

NODE_HOSTNAME="--hostname-override=10.6.60.99"

KUBELET_KUBECONFIG="--kubeconfig=/data/kubernetes/cfg/kubelet.kubeconfig"

KUBE_ALLOW_PRIV="--allow-privileged=true"

KUBELET_DNS_IP="--cluster-dns=10.1.0.2"

KUBELET_DNS_DOMAIN="--cluster-domain=cluster.local"

KUBELET_SWAP="--fail-swap-on=false"

vi /lib/systemd/system/kubelet.service

[Unit]

Description=Kubernetes Kubelet

After=docker.service

Requires=docker.service

[Service]

EnvironmentFile=-/data/kubernetes/cfg/kubelet

ExecStart=/data/kubernetes/bin/kubelet --cgroup-driver=cgroupfs --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice

${KUBE_LOGTOSTDERR}

${KUBE_LOg_DIR}

${KUBE_LOG_LEVEL}

${NODE_ADDRESS}

${NODE_PORT}

${NODE_HOSTNAME}

${KUBELET_KUBECONFIG}

${KUBE_ALLOW_PRIV}

${KUBELET_DNS_IP}

${KUBELET_DNS_DOMAIN}

${KUBELET_SWAP}

Restart=on-failure

KillMode=process

[Install]

WantedBy=multi-user.target

systemctl daemon-reload

systemctl enable kubelet

systemctl restart kubelet

systemctl status kubelet

---

vi /data/kubernetes/cfg/kube-proxy

KUBE_LOGTOSTDERR="--logtostderr=false"

KUBE_LOG_DIR="--log-dir=/data/kubernetes/logs"

KUBE_LOG_LEVEL="--v=4"

NODE_HOSTNAME="--hostname-override=192.168.xxx.112"

KUBE_MASTER="--master=http://192.168.xxx.112:8080"

vi /lib/systemd/system/kube-proxy.service

[Unit]

Description=Kubernetes Proxy

After=network.target

[Service]

EnvironmentFile=-/data/kubernetes/cfg/kube-proxy

ExecStart=/data/kubernetes/bin/kube-proxy

${KUBE_LOGTOSTDERR}

${KUBE_LOG_DIR}

${KUBE_LOG_LEVEL}

${NODE_HOSTNAME}

${KUBE_MASTER}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl daemon-reload

systemctl enable kube-proxy

systemctl restart kube-proxy

systemctl status kube-proxy

---

查看master是否能获取节点kubectl get node

echo "export PATH=$PATH:/data/kubernetes/bin" >> /etc/profile

source /etc/profile

-------------------------------------------所有机器执行-----------------------------------------------

docker pull 192.168.xxx.112/public/pause-amd64:3.0

docker tag 192.168.xxx.112/public/pause-amd64:3.0 gcr.io/google_containers/pause-amd64:3.0

yum install *rhsm* -y

#docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest

kubectl create -f dashboard-controller.yaml

kubectl create -f dashboard-service.yaml

kubernetes-dashboard 访问:http://192.168.xxx.112:8080/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy/#!/overview?namespace=default

问题记录:

1、Get https://192.168.4xxx.112:5000/v1/_ping: Tunnel Connection Failed

解决方案:

修改daemon.json文件sudo vim /etc/docker/daemon.json

添加:{ "insecure-registries":["192.168.xxx.81:5000"]}

2,kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.

这样解决(node :vim /etc/kubernetes/kubelet)

KUBELET_ARGS="--cluster-dns=192.168.xxx.110 --cluster-domain=test.com"

Kubernetes(k8s)部署好之后无法访问dashboard

vim /lib/systemd/system/docker.service

ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT

# kubelet

# 看到最后一行:error: failed to run Kubelet: failed to create kubelet: misconfiguration: kubelet cgroup driver: "cgroupfs" is different from docker cgroup driver: "systemd"

# vim /lib/systemd/system/docker.service

# 将 --exec-opt native.cgroupdriver=systemd 修改为:

# --exec-opt native.cgroupdriver=cgroupfs

# systemctl daemon-reload

# systemctl restart docker.service

# kubelet显示正常

Failed to get system container stats for "/system.slice/docker.service": failed to get cgroup stats for "/system.slice/docker.service": failed to get conta

system/kubelet.service:

ExecStart=/usr/bin/kubelet --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice

Kubernetes 解决/var/run/secret/kubernetes.io/serviceaccount/token no such file or directory问题

kubectl get serviceaccount

NAME SECRETS

default 0

如果没有则需要添加

在apiserver的启动参数中添加:

--admission_control=ServiceAccount

apiserver在启动的时候会自己创建一个key和crt(见/var/run/kubernetes/apiserver.crt和apiserver.key)

然后在启动./kube-controller-manager 时添加flag:

--service_account_private_key_file=/var/run/kubernetes/apiserver.key

kubectl get serviceaccount

NAME SECRETS

default 1

部署 Heapster 组件

Heapster 是容器集群监控和性能分析工具,天然的支持 Kubernetes 和 CoreOS。

在每个kubernetes Node上都会运行 Kubernetes 的监控agent---cAdvisor,它会收集本机以及容器的监控数据(cpu,memory,filesystem,network,uptime)。

cAdvisor web界面访问地址: http://< Node-IP >:4194

Heapster 是一个收集者,将每个 Node 上的 cAdvisor 的数据进行汇总,然后导到第三方工具(如InfluxDB)。

heapter+influxdb+grafana。heapter用来采集信息,influxdb用来存储,而grafana用来展示信息。

官方配置文件中包含如下镜像:

heapster

heapster-grafana

heapster-influxdb

官方地址:

https://github.com/kubernetes/heapster/tree/master/deploy/kube-config/

下载 heapster

7

# wget https://codeload.github.com/kubernetes/heapster/tar.gz/v1.5.0-beta.0 -O heapster-1.5.0-beta.tar.gz

# tar -zxvf heapster-1.5.0-beta.tar.gz

# cd heapster-1.5.0-beta.0/deploy/kube-config

# cp rbac/heapster-rbac.yaml influxdb/

# cd influxdb/

# ls

grafana.yaml heapster-rbac.yaml heapster.yaml influxdb.yaml

更换镜像地址并执行文件

10

# sed -i 's/gcr.io/google_containers/192.168.xxx.110/k8s/g' *.yaml

# kubectl create -f .

deployment "monitoring-grafana" created

service "monitoring-grafana" created

clusterrolebinding "heapster" created

serviceaccount "heapster" created

deployment "heapster" created

service "heapster" created

deployment "monitoring-influxdb" created

service "monitoring-influxdb" created

安装heapster涉及的镜像下载地址:

hub.c.163.com/zhijiansd/heapster-amd64:v1.4.0

hub.c.163.com/zhijiansd/heapster-grafana-amd64:v4.4.3

hub.c.163.com/zhijiansd/heapster-influxdb-amd64:v1.3.3

检查执行结果

1

2

3

4

# kubectl get deployments -n kube-system | grep -E 'heapster|monitoring'

heapster 1 1 1 1 1m

monitoring-grafana 1 1 1 1 1m

monitoring-influxdb 1 1 1 1 1m

检查 Pods

# kubectl get pods -n kube-system | grep -E 'heapster|monitoring' ###查看pods

heapster-d7f5dc5bf-k2c5v 1/1 Running 0 2m

monitoring-grafana-98d44cd67-nfmmt 1/1 Running 0 2m

monitoring-influxdb-6b6d749d9c-6q99p 1/1 Running 0 2m

# kubectl get svc -n kube-system | grep -E 'heapster|monitoring' ###查看services

heapster ClusterIP 10.254.198.254 80/TCP 2m

monitoring-grafana ClusterIP 10.254.73.182 80/TCP 2m

monitoring-influxdb ClusterIP 10.254.143.75 8086/TCP 2m

# kubectl cluster-info ###查看集群信息

Kubernetes master is running at https://192.168.xxx.102:6443

Heapster is running at https://192.168.xxx.102:6443/api/v1/namespaces/kube-system/services/heapster/proxy

KubeDNS is running at https://192.168.xxx.102:6443/api/v1/namespaces/kube-system/services/kube-dns/proxy

monitoring-grafana is running at https://192.168.xxx.102:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy

monitoring-influxdb is running at https://192.168.xxx.102:6443/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy

浏览器访问grafana:

https://master:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy

=========

创建kubernetes密钥:

https://blog.csdn.net/u012066426/article/details/72778902

/etc/sysconfig/docker

修改:OPTIONS='--selinux-enabled=false --log-driver=journald --signature-verification=false --insecure-registry=192.168.xxx.112'

./easyrsa init-pki

./easyrsa --batch "--req-cn=192.168.xxx.112@date +%s" build-ca nopass

./easyrsa --subject-alt-name="IP:192.168.xxx.112,IP:10.1.92.0,IP:10.1.0.1" build-server-full server nopass

cp -a pki/ca.crt pki/private/server.key pki/issued/server.crt /data/kubernetes/cret/

发现根本没有fluentd相关的Pod在运行或者在pending!

kubectl get -f fluentd-es-ds.yaml

发现我的minion节点根本没有这个Label,通过命令打label

kubectl label node k8s-node-1 beta.kubernetes.io/fluentd-ds-ready=true

nodeSelector:

cluster.local/role: it #指定选择 标签为it的机器

kubectl delete node 192.168.xxx.112

kubectl get nodes --show-labels

修改apiserver的配置文件:

========

# Add your own!

#KUBE_API_ARGS=""

KUBE_API_ARGS="--service-node-port-range=20000-65535“

========

重启apiserver,重新创建端口号20001的Service,验证Port Range是否修改成功。

15.2 kube-proxy报错kube-proxy[2241]: E0502 15:55:13.889842 2241 conntrack.go:42] conntrack returned error: error looking for path of conntrack: exec: “conntrack”: executable file not found in $PATH

导致现象:kubedns启动成功,运行正常,但是service 之间无法解析,kubernetes中的DNS解析异常

解决方法:CentOS中安装conntrack-tools包后重启kubernetes 集群即可。

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐