kubernetes 1.15.7 二进制安装部署 从入门到放弃
master 部署高版本的k8s为了提高安全性各种认证搞得很复杂很麻烦,请认真阅读安装步骤。总的来说不是很复杂,但是要注意细节。基本介绍master:192.168.7.120node:192.168.7.121kubernetes的版本是: 1.15.7docker版本是: 1.13.1flannel的版本是:0.7.1etcd版本是: 3.3.11先按照etcd我使用的yu...
·
kubernetes 安装部署
高版本的k8s为了提高安全性各种认证搞得很复杂很麻烦,请认真阅读安装步骤。总的来说不是很复杂,但是要注意细节。二进制文件直接去github上下载就好。
基本介绍
master:192.168.7.120
node:192.168.7.121
kubernetes的版本是: 1.15.7
docker版本是: 1.13.1
flannel的版本是:0.7.1
etcd版本是: 3.3.11
先装etcd
我使用的yum安装单机版http
# yum install -y etcd
[root@zw-m coredns]# cat /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@zw-m coredns]# cat /etc/etcd/etcd.conf
ETCD_NAME=default
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.7.120:2379"
启动etcd
# systemctl start etcd
[root@zw-m coredns]# systemctl status etcd
● etcd.service - Etcd Server
Loaded: loaded (/usr/lib/systemd/system/etcd.service; disabled; vendor preset: disabled)
Active: active (running) since Fri 2019-12-27 04:57:51 EST; 3 days ago
Main PID: 10998 (etcd)
CGroup: /system.slice/etcd.service
└─10998 /usr/bin/etcd --name=default --data-dir=/var/lib/etcd/default.etcd --listen-client-urls=http://0.0.0.0:2379
Dec 31 01:00:36 zw-m etcd[10998]: store.index: compact 92660
Dec 31 01:00:36 zw-m etcd[10998]: finished scheduled compaction at 92660 (took 1.845175ms)
Dec 31 01:05:36 zw-m etcd[10998]: store.index: compact 92994
Dec 31 01:05:36 zw-m etcd[10998]: finished scheduled compaction at 92994 (took 1.393403ms)
Dec 31 01:10:36 zw-m etcd[10998]: store.index: compact 93328
设置etcd创建一个kubernetes使用的key
# etcdctl mk /flannel/network/config '{"Network":"172.17.0.0/16","SubnetMin":"172.17.1.0","SubnetMax":"172.17.254.0"}'
# 查看下
[root@zw-m coredns]# etcdctl ls /flannel --recursive
/flannel/network
/flannel/network/subnets
/flannel/network/subnets/172.17.77.0-24
/flannel/network/subnets/172.17.40.0-24
/flannel/network/config
[root@zw-m coredns]# etcdctl get /flannel/network/config
{"Network":"172.17.0.0/16","SubnetMin":"172.17.1.0","SubnetMax":"172.17.254.0"}
master 部署
把运行指令放在master机器上
[root@zw-m ssl]# ll /usr/bin | grep kube
-rw-r--r-- 1 root root 164571520 Dec 29 23:14 kube-apiserver
-rw-r--r-- 1 root root 116466656 Dec 29 23:14 kube-controller-manager
-rw-r--r-- 1 root root 42993696 Dec 29 23:14 kubectl
-rw-r--r-- 1 root root 119702544 Dec 29 23:14 kubelet
-rw-r--r-- 1 root root 36991584 Dec 29 23:14 kube-proxy
-rw-r--r-- 1 root root 38790240 Dec 29 23:14 kube-scheduler
[root@zw-m ssl]# chmod +x /usr/bin/*
[root@zw-m ssl]# ll /usr/bin | grep kube
-rwxr-xr-x 1 root root 164571520 Dec 29 23:14 kube-apiserver
-rwxr-xr-x 1 root root 116466656 Dec 29 23:14 kube-controller-manager
-rwxr-xr-x 1 root root 42993696 Dec 29 23:14 kubectl
-rwxr-xr-x 1 root root 119702544 Dec 29 23:14 kubelet
-rwxr-xr-x 1 root root 36991584 Dec 29 23:14 kube-proxy
-rwxr-xr-x 1 root root 38790240 Dec 29 23:14 kube-scheduler
# 创建配置文件目录
[root@zw-m ssl]# mkdir /etc/kubernetes/
签证书
安装cfssl
$ wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
$ chmod +x cfssl_linux-amd64
$ mv cfssl_linux-amd64 /usr/local/bin/cfssl
$ wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
$ chmod +x cfssljson_linux-amd64
$ mv cfssljson_linux-amd64 /usr/bin/cfssljson
$ wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
$ chmod +x cfssl-certinfo_linux-amd64
$ mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
修改证书配置
[root@zw-m ssl]# ls
admin-csr.json ca-csr.json clear.sh front-proxy-client-csr.json kube-proxy-csr.json kube-scheduler.json
ca-config.json cert.sh front-proxy-ca-csr.json kube-controller-manager.json kubernetes-csr.json
[root@zw-m ssl]# cat kubernetes-csr.json
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.7.120", #master地址
"10.254.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
运行一键签证书:
警告全部忽略
看下cert.sh脚本
[root@zw-m ssl]# cat cert.sh
#/bin/bash
KUBE_APISERVER="https://$1:6443"
#生成 CA 证书和私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#生成 kubernetes 证书和私钥
sed -i "s%masterIp%$1%g" kubernetes-csr.json
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
#生成 admin 证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#生成 Front proxy 证书
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
cfssl gencert -ca=front-proxy-ca.pem -ca-key=front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
#生成 kube-controller-manager 证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager.json | cfssljson -bare kube-controller-manager
#生成 kube-scheduler 证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler.json | cfssljson -bare kube-scheduler
#生成 kube-proxy证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
function kubeproxyConfig()
{
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
}
# 生成token,bootstrapping和 kubeconfig配置文件
function kubeletConfig()
{
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
echo ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" >> token.csv
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
}
# 创建controller-manager.kubeconfig
function controllerConfig()
{
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=controller-manager.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
--client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=controller-manager.kubeconfig
# 设置上下文参数
kubectl config set-context system:kube-controller-manager \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=controller-manager.kubeconfig
# 设置默认上下文
kubectl config use-context system:kube-controller-manager \
--kubeconfig=controller-manager.kubeconfig
}
#创建kube-scheduler.kubeconfig
function schedulerConfig()
{
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-scheduler.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
# 设置上下文参数
kubectl config set-context kube-scheduler \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
# 设置默认上下文
kubectl config use-context kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
}
# 分发证书
mkdir -p /etc/kubernetes/ssl
scp *.pem /etc/kubernetes/ssl
# 生成 kube-proxy配置文件
# 生成kubeconfig
kubeproxyConfig
kubeletConfig
controllerConfig
schedulerConfig
scp *.kubeconfig *.csv /etc/kubernetes/
[root@zw-m ssl]# sh cert.sh 192.168.7.120
2019/12/29 23:16:33 [INFO] generating a new CA key and certificate from CSR
2019/12/29 23:16:33 [INFO] generate received request
2019/12/29 23:16:33 [INFO] received CSR
2019/12/29 23:16:33 [INFO] generating key: rsa-2048
2019/12/29 23:16:34 [INFO] encoded CSR
2019/12/29 23:16:34 [INFO] signed certificate with serial number 677780582531351817486211069264293497147575249754
2019/12/29 23:16:34 [INFO] generate received request
2019/12/29 23:16:34 [INFO] received CSR
2019/12/29 23:16:34 [INFO] generating key: rsa-2048
2019/12/29 23:16:34 [INFO] encoded CSR
2019/12/29 23:16:34 [INFO] signed certificate with serial number 557986097063909595065261797629500040517436716191
2019/12/29 23:16:34 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/12/29 23:16:34 [INFO] generate received request
2019/12/29 23:16:34 [INFO] received CSR
2019/12/29 23:16:34 [INFO] generating key: rsa-2048
2019/12/29 23:16:35 [INFO] encoded CSR
2019/12/29 23:16:35 [INFO] signed certificate with serial number 501842333810573709684748338703569380474603089899
2019/12/29 23:16:35 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/12/29 23:16:35 [INFO] generating a new CA key and certificate from CSR
2019/12/29 23:16:35 [INFO] generate received request
2019/12/29 23:16:35 [INFO] received CSR
2019/12/29 23:16:35 [INFO] generating key: rsa-2048
2019/12/29 23:16:35 [INFO] encoded CSR
2019/12/29 23:16:35 [INFO] signed certificate with serial number 35907834505483465635003820328689908842781384357
2019/12/29 23:16:35 [INFO] generate received request
2019/12/29 23:16:35 [INFO] received CSR
2019/12/29 23:16:35 [INFO] generating key: rsa-2048
2019/12/29 23:16:36 [INFO] encoded CSR
2019/12/29 23:16:36 [INFO] signed certificate with serial number 573086425276409361516892816568817857097136162505
2019/12/29 23:16:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/12/29 23:16:36 [INFO] generate received request
2019/12/29 23:16:36 [INFO] received CSR
2019/12/29 23:16:36 [INFO] generating key: rsa-2048
2019/12/29 23:16:36 [INFO] encoded CSR
2019/12/29 23:16:36 [INFO] signed certificate with serial number 113488023572397931031569335910777320367782909615
2019/12/29 23:16:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/12/29 23:16:36 [INFO] generate received request
2019/12/29 23:16:36 [INFO] received CSR
2019/12/29 23:16:36 [INFO] generating key: rsa-2048
2019/12/29 23:16:36 [INFO] encoded CSR
2019/12/29 23:16:36 [INFO] signed certificate with serial number 111893924363735307869120674051318016000539245824
2019/12/29 23:16:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/12/29 23:16:36 [INFO] generate received request
2019/12/29 23:16:36 [INFO] received CSR
2019/12/29 23:16:36 [INFO] generating key: rsa-2048
2019/12/29 23:16:37 [INFO] encoded CSR
2019/12/29 23:16:37 [INFO] signed certificate with serial number 50159815023304060181895284557719414515726552316
2019/12/29 23:16:37 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "system:kube-controller-manager" set.
Context "system:kube-controller-manager" created.
Switched to context "system:kube-controller-manager".
Cluster "kubernetes" set.
User "kube-scheduler" set.
Context "kube-scheduler" created.
Switched to context "kube-scheduler".
# 产生的证书有
[root@zw-m ssl]# ls
admin.csr ca-csr.json front-proxy-ca-csr.json kube-controller-manager.csr kube-proxy.kubeconfig kube-scheduler.json
admin-csr.json ca-key.pem front-proxy-ca-key.pem kube-controller-manager.json kube-proxy.pem kube-scheduler-key.pem
admin-key.pem ca.pem front-proxy-ca.pem kube-controller-manager-key.pem kubernetes.csr kube-scheduler.kubeconfig
admin.pem cert.sh front-proxy-client.csr kube-controller-manager.pem kubernetes-csr.json kube-scheduler.pem
bootstrap.kubeconfig clear.sh front-proxy-client-csr.json kube-proxy.csr kubernetes-key.pem token.csv
ca-config.json controller-manager.kubeconfig front-proxy-client-key.pem kube-proxy-csr.json kubernetes.pem
ca.csr front-proxy-ca.csr front-proxy-client.pem kube-proxy-key.pem kube-scheduler.csr
创建 kubectl kubeconfig 文件
$ export KUBE_APISERVER="https://192.168.7.120:6443"
$ kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER}
$ kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ssl/admin-key.pem
$ kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
$ kubectl config use-context kubernetes
# 查看
/root/.kube 目录下的config文件
创建角色
服务的访问调用都需要角色去调用(api 起来后操作)
kubectl create clusterrolebinding controller-manager-admin-binding --clusterrole=cluster-admin --user=system:kube-controller-manager
kubectl create clusterrolebinding kube-scheduler-admin-binding --clusterrole=cluster-admin --user=kube-scheduler
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
--write-config-to 命令写入默认配置
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=cluster-admin --user=kubelet-bootstrap
yum install -y ipvsadm
修改配置文件
### kube-apiserver.service
[root@zw-m system]# cat kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/apiserver
User=root
ExecStart=/usr/bin/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@zw-m kubernetes]# cat apiserver
KUBELET_PORT="--secure-port=6443 --insecure-bind-address=192.168.7.120"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.7.120:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
KUBE_API_ARGS="--endpoint-reconciler-type=master-count --bind-address=192.168.7.120"
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true --authorization-mode=RBAC --token-auth-file=/etc/kubernetes/token.csv --kubelet-https=true --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --apiserver-count=1 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --event-ttl=1h --runtime-config=api/all=true --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem --requestheader-allowed-names=front-proxy-client --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/etc/kubernetes/ssl/front-proxy-client.pem --proxy-client-key-file=/etc/kubernetes/ssl/front-proxy-client-key.pem --enable-aggregator-routing=true "
### kube-controller-manager.service
[root@zw-m system]# cat kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
User=root
ExecStart=/usr/bin/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@zw-m kubernetes]# cat controller-manager
KUBE_CONTROLLER_MANAGER_ARGS=" --bind-address=127.0.0.1 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig --service-cluster-ip-range=10.254.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
### kube-scheduler.service
[root@zw-m system]# cat kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
User=root
ExecStart=/usr/bin/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@zw-m kubernetes]# cat scheduler
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_MASTER="--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig"
启动服务
for SERVICE in kube-apiserver kube-controller-manager kube-scheduler; do
systemctl enable $SERVICE
systemctl restart $SERVICE
systemctl status $SERVICE
done
node节点安装
安装docker,flannel。
使用yum安装,我就不多介绍了。
复制bin文件
[root@zw-n1 bin]# ll -th | grep kube
-rwxr-xr-x 1 root root 115M Dec 30 02:16 kubelet
-rwxr-xr-x 1 root root 42M Dec 30 02:16 kubectl
-rwxr-xr-x 1 root root 36M Dec 30 02:16 kube-proxy
[root@zw-n1 bin]# pwd
/usr/bin
[root@zw-n1 bin]# mkdir /var/lib/kubelet
直接复制serever文件
# kubelet.service
[root@zw-n1 bin]# cat /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
User=root
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_POD_INFRA_CONTAINER \
$KUBELET_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target
# kube-proxy.service
[root@zw-n1 bin]# cat /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
配置文件
# 需要的文件
[root@zw-n1 kubernetes]# ls
bootstrap.kubeconfig kubelet kubelet.yaml kube-proxy.kubeconfig kube-proxy.yaml proxy
# 创建个文件夹 mkdir /etc/kubernetes/ssl
# kube-proxy 配置
[root@zw-n1 kubernetes]# cat proxy
KUBE_PROXY_ARGS="--config=/etc/kubernetes/kube-proxy.yaml "
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
[root@zw-n1 kubernetes]# cat /etc/kubernetes/kube-proxy.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.7.121 # 本机ip
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
qps: 5
clusterCIDR: "10.254.0.0/16"
configSyncPeriod: 15m0s
conntrack:
max: 0
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "zw-n1" # 本机hostname
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.7.121:10249 # 本机ip:10249
mode: "iptables"
nodePortAddresses: []
oomScoreAdj: -999
portRange: "30000-32767"
resourceContainer: /kube-proxy
udpIdleTimeout: 250ms
# kubelet
[root@zw-n1 kubernetes]# cat kubelet
KUBELET_HOSTNAME="--hostname-override=zw-n1"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry:5000/pause-amd64:3.0"
KUBELET_ARGS="--config=/etc/kubernetes/kubelet.yaml --cert-dir=/etc/kubernetes/ssl --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --runtime-cgroups=/systemd/system.slice"
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
# pod infrastructure container
KUBE_ALLOW_PRIV=""
[root@zw-n1 kubernetes]# cat kubelet.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.7.121
port: 10250
readOnlyPort: 10255
clusterDNS:
- 10.254.10.20
clusterDomain: cluster.local
cgroupDriver: cgroupfs
kubeletCgroups: /systemd/system.slice
failSwapOn: false
authentication:
webhook:
enabled: false
cacheTTL: "2m0s"
anonymous:
enabled: true
# 剩下文件都是自动生成的
启动服务
for SERVICE in kube-proxy kubelet flanneld docker; do
systemctl enable $SERVICE
systemctl restart $SERVICE
systemctl status $SERVICE
done
授权node
### 先获取csr
[root@zw-m kubernetes]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-bH36fnpMwIvnZssFMp_Derm4dhW38OtJTkieRgubuQA 8m1s kubelet-bootstrap Approved,Issued
[root@zw-m kubernetes]# kubectl certificate approve node-csr-bH36fnpMwIvnZssFMp_Derm4dhW38OtJTkieRgubuQA
certificatesigningrequest.certificates.k8s.io/node-csr-bH36fnpMwIvnZssFMp_Derm4dhW38OtJTkieRgubuQA approved
创建role
kubectl create clusterrolebinding system:node:zw-n1 --clusterrole=cluster-admin --user=system:node:zw-n1
–注意 node节点需要安装
yum install -y ipset
然后在master get node
[root@zw-m ssl]# kubectl get node
NAME STATUS ROLES AGE VERSION
zw-n1 Ready <none> 21h v1.15.7
coredns服务
[root@zw-m coredns]# cat coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local 10.254.0.0/16 {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: coredns/coredns:1.3.1
imagePullPolicy: IfNotPresent
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.10.20
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
使用的是官方推荐的coredns版本。kubectl get 下看看
[root@zw-m coredns]# kubectl get po --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-775f967869-mh4wc 1/1 Running 0 20h
kube-system coredns-775f967869-n95cd 1/1 Running 0 20h
kube-system kubernetes-dashboard-69c485b85c-h27qs 1/1 Running 0 20h
[root@zw-m coredns]# kubectl cluster-info
Kubernetes master is running at https://192.168.7.120:6443
CoreDNS is running at https://192.168.7.120:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
更多推荐
已为社区贡献6条内容
所有评论(0)