k8s+docker实战(长篇)
来源:https://blog.csdn.net/qq_37175369/article/details/79878834文章所有用到的文件都在这个压缩包里链接:https://pan.baidu.com/s/1ib7pUGtEDp_DqsuO5jOrAA 密码:vvtx首先本文参照Hek_watermelon的博客编写,解决了部署中遇到的一些问题,传送门https://blog.cs...
来源:https://blog.csdn.net/qq_37175369/article/details/79878834
文章所有用到的文件都在这个压缩包里
链接:https://pan.baidu.com/s/1ib7pUGtEDp_DqsuO5jOrAA 密码:vvtx
首先本文参照Hek_watermelon的博客编写,解决了部署中遇到的一些问题,传送门https://blog.csdn.net/hekanhyde/article/details/78595236
下面开始
安装docker
Centos 6
yum installdocker-engine-1.7.1-1.el6.x86_64.rpm
Centos 7
先删除之前的安装包
yum remove docker docker-commondocker-selinux docker-engine –y
配置官方yum源
yum install -y yum-utilsdevice-mapper-persistent-data lvm2
由于科学上网,使用阿里的镜像源
yum-config-manager --add-repohttp://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum-config-manager --enable docker-ce-edge
yum-config-manager --enabledocker-ce-testing
yum-config-manager --disable docker-ce-edge
yum erase docker-engine-selinux -y
yum makecache fast
安装docker-ce
yum install docker-ce –y
缺哪个yum哪个,如果连不了外网需要调yum代理,在/etc/yum.repo.d下复制进docker-ce.repo,代理到外网服务器
下面6、7相同
由于科学上网,需要编辑/etc/docker/daemon.json文件,否则无法下载image镜像
加入
{
"registry-mirrors":["https://registry.docker-cn.com"]
}
service docker start
cfssl
安装cfssl密钥工具
wgethttps://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64/usr/local/bin/cfssljson
wgethttps://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64/usr/local/bin/cfssl-certinfo
export PATH=/usr/local/bin:$PATH
创建etcd相关证书密钥
创建etcd跟CA证书
mkdir ~/etcd_ssl ~/kubernets_ssl
cd ~/etcd_ssl/
cat > etcd-root-ca-csr.json << EOF
{
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "Beijing",
"ST": "Beijing",
"C": "CN"
}
],
"CN": "etcd-root-ca"
}
EOF
创建etcd群集证书配置文件
cat > etcd-gencert.json << EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"etcd": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "8760h"
}
}
}
}
EOF
生成etcd证书签名请求(csr)
cat > etcd-csr.json << EOF
{
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "Beijing",
"ST": "Beijing",
"C": "CN"
}
],
"CN": "etcd",
"hosts": [
"127.0.0.1",
"localhost",
"172.16.68.83", //此三行替换需安装k8s集群IP地址
"172.16.68.85",
"172.16.68.86"
]
}
EOF
"hosts":表明指定授权使用该证书的 etcd 节点 IP,如果只写127.0.0.1,和本机网卡IP,则需要在3台etcd节点上分别进行证书签名请求,本次为了方便将所有节点的IP都写入。后续只需要将证书进行复制即可
生成etcd证书
cfssl gencert --initca=trueetcd-root-ca-csr.json \
| cfssljson --bare etcd-root-ca
创建根CA
cfssl gencert --ca etcd-root-ca.pem \
--ca-key etcd-root-ca-key.pem \
--config etcd-gencert.json \
-profile=etcd etcd-csr.json | cfssljson--bare etcd
移除.csr .json
rm *.csr *.json
生成kubernets相关证书秘钥
创建kubernets 根CA证书
cd ~/kubernets_ssl/
cat > k8s-root-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
创建kuber-apiserver所使用证书配置文件
cat > k8s-gencert.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
生成kube-apiserver证书签名请求(csr)
cat > kubernetes-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.254.0.1",
"172.16.68.83", //下三行换成自己IP
"172.16.68.85",
"172.18.68.86",
"localhost",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU":"System"
}
]
}
EOF
生成kube-apiserver所使用证书
cfssl gencert --initca=truek8s-root-ca-csr.json \
| cfssljson --bare k8s-root-ca
生成kubernet ca根证(k8s-root-ca.csr、k8s-root-ca.pem、k8s-root-ca-key.pem)
cfssl gencert --ca=k8s-root-ca.pem \
--ca-key=k8s-root-ca-key.pem \
--config k8s-gencert.json \
--profile kubernetes kubernetes-csr.json\
|cfssljson --bare kubernetes
生成kubelet证书签名请求(csr)
cat > admin-csr.json << EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
生成kubelet所使用证书
cfssl gencert --ca=k8s-root-ca.pem \
--ca-key=k8s-root-ca-key.pem \
--config k8s-gencert.json \
--profile kubernetes admin-csr.json\
|cfssljson --bare admin
生成kube-proxy证书签名请求(csr)
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
生成kub-proxy所使用证书
cfssl gencert --ca=k8s-root-ca.pem \
--ca-key=k8s-root-ca-key.pem \
--config k8s-gencert.json \
--profile kubernetes kube-proxy-csr.json\
|cfssljson --bare kube-proxy
rm *.csr *.json
以下基于centos7
etcd集群搭建
安装etcd
wgethttps://github.com/coreos/etcd/releases/download/v3.1.5/etcd-v3.1.5-linux-amd64.tar.gz
tar -xvf etcd-v3.1.5-linux-amd64.tar.gz
mv etcd-v3.1.5-linux-amd64/etcd*/usr/local/bin
cat > /usr/lib/systemd/system/etcd.service<< EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c"GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name=\"${ETCD_NAME}\"--data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
或者直接yum install etcd 注意此时的etcd是在/usr/bin下,对应的etcd.service的ExecStart属性也是在/usr/bin下
配置etcd环境变量
vi /etc/etcd/etcd.conf
示例:展示两台服务器的配置文件,可以观察哪些变化了,针对自己的IP进行修改
172.16.68.83:etcd.conf
# [member]
ETCD_NAME=cluster1
ETCD_DATA_DIR="/var/lib/etcd/cluster1.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://172.16.68.83:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.68.83:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""
# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.68.83:2380"
# if you use different ETCD_NAME (e.g.test), set ETCD_INITIAL_CLUSTER value for this name, i.e."test=http://..."
ETCD_INITIAL_CLUSTER="cluster1=https://172.16.68.83:2380,cluster2=https://172.16.68.85:2380,cluster3=https://172.16.68.86:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.68.83:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
# [security]
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_PEER_AUTO_TLS="true"
# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levelsetcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
172.16.68.85 : etcd.conf
# [member]
ETCD_NAME=cluster2
ETCD_DATA_DIR="/var/lib/etcd/cluster2.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://172.16.68.85:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.68.85:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""
# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.68.85:2380"
# if you use different ETCD_NAME (e.g.test), set ETCD_INITIAL_CLUSTER value for this name, i.e."test=http://..."
ETCD_INITIAL_CLUSTER="cluster1=https://172.16.68.83:2380,cluster2=https://172.16.68.85:2380,cluster3=https://172.16.68.86:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.68.85:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
# [security]
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_PEER_AUTO_TLS="true"
# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levelsetcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
属性简介
ETCD_NAME: etcd节点名称,如果是静态etcd cluster,必须与ETCD_INITIAL_CLUSTER中的名称进行对应。
ETCD_INITIAL_CLUSTER_STATE: new为新建集群,如果是加入一个已经存在的etcd集群,需将该参数改为existing
ETCD_DATA_DIR=:存放etcdmember等db数据
ETCD_CLIENT_CERT_AUTH、ETCD_TRUSTED_CA_FILE、ETCD_CERT_FILE、ETCD_KEY_FILE等:为etcd TLS所需证书,制定之前创建的证书即可。
每台etcd master上都要配置这个配置文件。
在每台etcd master上执行
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
检查节点 状态
export ETCDCTL_API=3
etcdctl--cacert=/etc/etcd/ssl/etcd-root-ca.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints=https://172.16.68.83:2379,https://172.16.68.85:2379,https://172.16.68.86:2379\
endpoint health
https://172.16.68.83:2379 is healthy:successfully committed proposal: took = 2.016793ms
https://172.16.68.85:2379 is healthy:successfully committed proposal: took = 2.005839ms
https://172.16.68.86:2379 is healthy:successfully committed proposal: took = 1.167565ms
安装kubectl管理工具
复制kubernetes-server-linux-amd64.tar.gz到其中一台服务器
tar –zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes
tar -xzvf kubernetes-src.tar.gz
cp -rserver/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet}/usr/local/bin/
分发kubernets相关证书
cd ~/kubernets_ssl/
for IP in 83 85 86; do
ssh root@172.16.68.$IP mkdir -p/etc/kubernetes/ssl
scp *.pem root@172.16.68.$IP:/etc/kubernetes/ssl
ssh root@172.16.68.$IP chown -R kube:kube /etc/kubernetes/ssl
done
将IP换成自己的IP即可
生成kubectl kubeconfig 文件
在所有master上分别执行
# 设置集群参数-在~/.kube/config加入ca证书
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/k8s-root-ca.pem \
--embed-certs=true \
--server=https://172.16.68.83:6443 //这个IP地址需要改为对应IP
# 设置客户端认证参数-指定之前创建的admin证书对
kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ssl/admin-key.pem
# 设置上下文参数
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
# 设置默认上下文
kubectl config use-context kubernetes
查看生成的~/.kube/config
cat ~/.kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data:...6VjV4dUFBZ3RQNVA0ZDVRY0wyVmF5KytJVm8rRGpPL2NxMlBCMDhEOWl2cHhvTlNDREhMVUpkMWMKSzVzV1ptY21CbTZVejdNTkxLZHBQNTNpR1ZqSFg3ZFpRbzVZd1R4cEZHNHMrdHpEYWRUTnVyeXpJa2d5cStDYgpxdWUzdmVpR0tGU0IxKzZkMmZCT2ZuRko3K0hxRWZaZDl5VitucTF2TlFOT042SXRIclJSUlBMTkljUWFPTmorCjI0dzZIdGpQeFA0b2wxeC8wcG1BNGJUSkd1aXBIUTAvbGJrZkcyRVpnK2UzcFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: 172.16.68.83
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: admin
name: kubernetes
current-context: kubernetes
kind: Config
preferences: {}
users:
- name: admin
user:
as-user-extra:{}
至此kubectl管理工具安装完成
Master搭建
service配置文件/usr/lib/systemd/system/kube-apiserver.service内容:
/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=KubernetesAPI Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/bin/kube-apiserver\
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
/etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used toconfigure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in thesystemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=2"
# Should this cluster be allowed to runprivileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
# How the controller-manager, scheduler,and proxy find the apiserver
KUBE_MASTER="--master=http://127.0.0.1:8080"
Apiserver文件仍然两个例子用于比对
Node1:/etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used toconfigure the kube-apiserver
#
# The address on the local server to listento.
KUBE_API_ADDRESS="--advertise-address=172.16.68.83--insecure-bind-address=127.0.0.1 --bind-address=172.16.68.83"
# The port on the local server to listenon.
KUBE_API_PORT="--insecure-port=8080--secure-port=6443"
# Port minions listen on
#KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcdcluster
KUBE_ETCD_SERVERS="--etcd-servers=https://172.16.68.83:2379,https://172.16.68.85:2379,https://172.16.68.86:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction"
# Add your own!
KUBE_API_ARGS="--authorization-mode=RBAC,Node\
--runtime-config=batch/v2alpha1=true \
--anonymous-auth=false \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--etcd-quorum-read=true \
--storage-backend=etcd3 \
--etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem\
--enable-swagger-ui=true \
--apiserver-count=3 \
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-audit/audit.log \
--event-ttl=1h"
Node2:/etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used toconfigure the kube-apiserver
#
# The address on the local server to listento.
KUBE_API_ADDRESS="--advertise-address=172.16.68.85--insecure-bind-address=127.0.0.1 --bind-address=172.16.68.85"
# The port on the local server to listenon.
KUBE_API_PORT="--insecure-port=8080--secure-port=6443"
# Port minions listen on
#KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcdcluster
KUBE_ETCD_SERVERS="--etcd-servers=https://172.16.68.83:2379,https://172.16.68.85:2379,https://172.16.68.86:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction"
# Add your own!
KUBE_API_ARGS="--authorization-mode=RBAC,Node\
--runtime-config=batch/v2alpha1=true \
--anonymous-auth=false \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem\
--client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--etcd-quorum-read=true \
--storage-backend=etcd3 \
--etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--enable-swagger-ui=true \
--apiserver-count=3 \
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-audit/audit.log\
--event-ttl=1h"
简介:
KUBE_API_ADDRESS:制定apiserver监听的IP,http监听127.0.0.1(不对外),https监听本机网卡地址。
--authorization-mode=RBAC,Node:授权模型增加了 Node 参数,因为 1.8 后默认system:node role 不会自动授予 system:nodes 组
由于以上原因,–admission-control 同时增加了 NodeRestriction 参数
--enable-bootstrap-token-auth:用于开启apiserver token认证,支持kubelet通过token的方式进行注册。
--token-auth-file=/etc/kubernetes/token.csv:对应记录token的文件位置,后续需创建。
增加 --audit-policy-file参数用于指定高级审计配置
增加--runtime-config=batch/v2alpha1=true 参数用于cron job定时任务的支持。
创建对应的token文件、kubelet TLS相关配置文件、kube-proxy TLS相关配置文件以及audit-prolicy.yaml文件
##设置环境变量,生成token随机数
exportKUBE_APISERVER="https://127.0.0.1:6443"
export BOOTSTRAP_TOKEN=$(head -c 16/dev/urandom | od -An -t x | tr -d ' ')
echo "Tokne: ${BOOTSTRAP_TOKEN}"
##创建对应的token文件
cat > /etc/kubernetes/token.csv<<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
##创建kubelet以及kube-proxy的配置文件
##kubelet配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-credentialskubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
kubectl config use-context default--kubeconfig=bootstrap.kubeconfig
##kube-proxy配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default--kubeconfig=kube-proxy.kubeconfig
##生成高级审计配置
cat >> audit-policy.yaml <<EOF
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
- level: Metadata
EOF
分发token文件、kubelet TLS相关配置文件、kube-proxy TLS相关配置文件以及audit-prolicy.yaml文件至三台master对应目录
for IP in 83 85 86;do
scp *.kubeconfig /etc/kubernetes/token.csv audit-policy.yaml root@172.16.68.$IP:/etc/kubernetes
ssh root@172.18.169.$IP chown -R kube:kube /etc/kubernetes/ssl
done
设置 log 目录权限
for IP in 83 85 86;do
ssh root@172.16.68.$IP mkdir -p /var/log/kube-audit/usr/libexec/kubernetes
ssh root@172.16.68.$IP chown -R kube:kube /var/log/kube-audit/usr/libexec/kubernetes
ssh root@172.16.68.$IP chmod -R 755 /var/log/kube-audit/usr/libexec/kubernetes
done
kubectl启动文件
/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/bin/kube-controller-manager\
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
/etc/kubernetes/controller-manager
###
# The following values are used toconfigure the kubernetes controller-manager
# defaults from config and apiserver shouldbe adequate
# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0\
--service-cluster-ip-range=10.254.0.0/16\
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem\
--service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem\
--root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
--leader-elect=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=5m0s"
Kube-scheduler启动文件
/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/bin/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
/etc/kubernetes/scheduler
###
# kubernetes scheduler config
# default config should be adequate
# Add your own!
KUBE_SCHEDULER_ARGS="--leader-elect=true--address=0.0.0.0"
注意每台服务器都要创建启动文件及配置文件,每台服务器都要启动kube-apiserver,kube-controller-manager,kube-scheduler
启动服务并查看群集组件状态
sudo systemctl daemon-reload
sudo systemctl start kube-apiserver
sudo systemctl startkube-controller-manager
sudo systemctl start kube-scheduler
sudo systemctl enable kube-apiserver
sudo systemctl enablekube-controller-manager
sudo systemctl enable kube-scheduler
sudo kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
至此master节点基本部署完成
Node节点搭建
示例中以master中的83和85作为node,此环节请看完这个环节的文档后再动手部署
由于真实场景node与master不会再同一服务器上,列出正常分离流程
Node节点中所需环境:
Dokcer
Kubelet
Kube-proxy
kubectl
还记得之前master部署中的压缩包吗,里面有这两个东西,参考刚才的流程将kubelet与kube-proxy放到/usr/local/bin
/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/bin/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_POD_INFRA_CONTAINER \
$KUBELET_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target
/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/bin/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
##分发kubernets证书(k8s-root-ca.pem):
cd /etc/kubernetes/ssl/
ssh root@172.16.68.87mkdir /etc/kubernetes/ssl
scp k8s-root-ca.pemroot@172.16.68.87:/etc/kubernetes/ssl
##分发bootstrap.kubeconfig kube-proxy.kubeconfig文件或者node节点上重新生成这两个配置文件
##方法1:分发
$ cd /etc/kubernetes/
$ scp *.kubeconfigroot@172.16.68.87:/etc/kubernetes
##方法2:在node节点上操作生成对应kubelet配置文件
##kubelet配置文件
$ # 设置集群参数
$ kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
$ # 设置客户端认证参数
$ kubectl config set-credentialskubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
$ # 设置上下文参数
$ kubectl config set-context default \
--cluster=kubernetes\
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
$ # 设置默认上下文
$ kubectl config use-context default--kubeconfig=bootstrap.kubeconfig
$ mv bootstrap.kubeconfig /etc/kubernetes/
####特别注意,${BOOTSTRAP_TOKEN}要写成之前apiserver,token文件里的token字段
##kube-proxy配置文件
$ # 设置集群参数
$ kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
$ # 设置客户端认证参数
$ kubectl config set-credentials kube-proxy\
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
$ # 设置上下文参数
$ kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
$ # 设置默认上下文
$ kubectl config use-context default--kubeconfig=kube-proxy.kubeconfig
$ mv kube-proxy.kubeconfig /etc/kubernetes/
###设置属主属组
$ ssh root@172.16.68.87 chown -R kube:kube/etc/kubernetes/ssl
修改通用配置文件
/etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used toconfigure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in thesystemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=2"
# Should this cluster be allowed to runprivileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
# How the controller-manager, scheduler,and proxy find the apiserver
#KUBE_MASTER="--master=http://127.0.0.1:8080"
/etc/kubernetes/kubelet 注意要修改地址和hostname为本机域名
###
# kubernetes kubelet (minion) config
# The address for the info server to serveon (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=172.16.68.87"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
# You may leave this blank to use theactual hostname
KUBELET_HOSTNAME="--hostname-override=cluster4"
# location of the api-server
# KUBELET_API_SERVER=""
# Add your own!
KUBELET_ARGS="--cgroup-driver=cgroupfs\
--cluster-dns=10.254.0.2 \
--resolv-conf=/etc/resolv.conf \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--fail-swap-on=false \
--cert-dir=/etc/kubernetes/ssl \
--cluster-domain=cluster.local. \
--hairpin-mode=promiscuous-bridge\
--serialize-image-pulls=false \
--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0"
Pause-amd64会被墙,需要先load到docker中
复制gcr.io_google_containers_pause-amd64_3.0.tar到服务器
docker load -i gcr.io_google_containers_pause-amd64_3.0.tar
/etc/kubernetes/proxy
###
# kubernetes proxy config
# default config should be adequate
# Add your own!
KUBE_PROXY_ARGS="--bind-address=172.16.68.87 \
--hostname-override=cluster4\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
--cluster-cidr=10.254.0.0/16"
由于 HA 方案基于 Nginx 反代实现,所以每个 Node 要启动一个 Nginx 负载均衡 Master
# 创建配置目录
mkdir -p /etc/nginx
# 写入代理配置
cat << EOF >>/etc/nginx/nginx.conf
error_log stderr notice;
worker_processes auto;
events {
multi_accept on;
useepoll;
worker_connections 1024;
}
stream {
upstream kube_apiserver {
least_conn;
server 172.16.68.83:6443;
server 172.16.68.85:6443;
server 172.16.68.86:6443;
}
server {
listen 0.0.0.0:6443;
proxy_pass kube_apiserver;
proxy_timeout 10m;
proxy_connect_timeout 1s;
}
}
EOF
# 更新权限
chmod +r /etc/nginx/nginx.conf
nginx-proxy.service
cat << EOF >>/etc/systemd/system/nginx-proxy.service
[Unit]
Description=kubernetes apiserver dockerwrapper
Wants=docker.socket
After=docker.service
[Service]
User=root
PermissionsStartOnly=true
ExecStart=/usr/bin/docker run -p127.0.0.1:6443:6443 \\
-v/etc/nginx:/etc/nginx \\
--namenginx-proxy \\
--net=host \\
--restart=on-failure:5 \\
--memory=512M \\
nginx:1.13.5-alpine
ExecStartPre=-/usr/bin/docker rm -fnginx-proxy
ExecStop=/usr/bin/docker stop nginx-proxy
Restart=always
RestartSec=15s
TimeoutStartSec=30s
[Install]
WantedBy=multi-user.target
EOF
最后启动 Nginx 代理即可
systemctl daemon-reload
systemctl start nginx-proxy
systemctl enable nginx-proxy
添加 Node
# 在任意 master 执行即可
kubectl create clusterrolebindingkubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
然后启动 kubelet
systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet
在任意master节点上查看证书请求:
kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-NzOwTOc5VkR7vFQyctMb99iKuUX69ls536k39aJLSog 1m kubelet-bootstrap Pending
approve就可以了:
kubectl certificate approve node-csr-NzOwTOc5VkR7vFQyctMb99iKuUX69ls536k39aJLSog
certificatesigningrequest"node-csr-NzOwTOc5VkR7vFQyctMb99iKuUX69ls536k39aJLSog" approved
kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-NzOwTOc5VkR7vFQyctMb99iKuUX69ls536k39aJLSog 2m kubelet-bootstrap Approved,Issued
kubectl get nodes
NAME STATUS ROLES AGE VERSION
Cluster4 Ready <none> 31s v1.8.0
查看node节点自动生成的客户端啊证书对(ubelet-client.crt kubelet-client.key kubelet.crt kubelet.key
ls /etc/kubernetes/ssl/
k8s-root-ca.pem kubelet-client.crt kubelet-client.key kubelet.crt kubelet.key
最后再启动 kube-proxy 即可:
systemctl start kube-proxy
systemctl enable kube-proxy
如果是在master上建立node,那么只需要修改bootstrap.kubeconfig和kube-proxy.kubeconfig,将其中的server由127.0.0.1改成IP。如:172.16.68.83即可,同时master上不需要安装nginx负载均衡
查看:
kubectl get nodes
NAME STATUS ROLES AGE VERSION
cluster1 Ready <none> 3s v1.8.0
clusrer2 Ready <none> 8s v1.8.0
cluster3 Ready <none> 8s v1.8.0
cluster4 Ready <none> 9m v1.8.0
至此,node节点部署完毕
Calico插件部署
简介:
Calico是一个纯3层的数据中心网络方案,而且无缝集成像OpenStack这种IaaS云架构,能够提供可控的VM、容器、裸机之间的IP通信。Calico不使用重叠网络比如flannel和libnetwork重叠网络驱动,它是一个纯三层的方法,使用虚拟路由代替虚拟交换,每一台虚拟路由通过BGP协议传播可达信息(路由)到剩余数据中心。
Calico在每一个计算节点利用LinuxKernel实现了一个高效的vRouter来负责数据转发,而每个vRouter通过BGP协议负责把自己上运行的workload的路由信息像整个Calico网络内传播——小规模部署可以直接互联,大规模下可通过指定的BGP route reflector来完成。
Calico节点组网可以直接利用数据中心的网络结构(无论是L2或者L3),不需要额外的NAT,隧道或者Overlay Network。
Calico基于iptables还提供了丰富而灵活的网络Policy,保证通过各个节点上的ACLs来提供Workload的多租户隔离、安全组以及其他可达性限制等功能。
上文都是复制的,本人感觉是用来为每一个docker(同宿主机或不同宿主机)容器赋予唯一IP,并且互相通信
获取最新的calico.yaml:
sudo mkdir ~/calico/
cd ~/calico/
wget https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml
修改calico.yaml文件:
# 替换 Etcd 地址
sed -i 's@.*etcd_endpoints:.*@\ \etcd_endpoints:\ \"https://172.16.68.83:2379,https://172.16.68.85:2379,https://172.16.68.86:2379\"@gi'calico.yaml
# 替换 Etcd 证书
export ETCD_CERT=`cat/etc/etcd/ssl/etcd.pem | base64 | tr -d '\n'`
export ETCD_KEY=`cat/etc/etcd/ssl/etcd-key.pem | base64 | tr -d '\n'`
export ETCD_CA=`cat/etc/etcd/ssl/etcd-root-ca.pem | base64 | tr -d '\n'`
sed -i "s@.*etcd-cert:.*@\ \etcd-cert:\ ${ETCD_CERT}@gi" calico.yaml
sed -i "s@.*etcd-key:.*@\ \ etcd-key:\${ETCD_KEY}@gi" calico.yaml
sed -i "s@.*etcd-ca:.*@\ \ etcd-ca:\${ETCD_CA}@gi" calico.yaml
sed -i 's@.*etcd_ca:.*@\ \ etcd_ca:\"/calico-secrets/etcd-ca"@gi' calico.yaml
sed -i 's@.*etcd_cert:.*@\ \ etcd_cert:\"/calico-secrets/etcd-cert"@gi' calico.yaml
sed -i 's@.*etcd_key:.*@\ \ etcd_key:\"/calico-secrets/etcd-key"@gi' calico.yaml
也可以使用文件包下calico.yaml文件,需要修改end_points: “…”,设置为自己的IP。
修改kubelet配置
/etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config
# The address for the info server to serveon (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=172.16.68.83"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
# You may leave this blank to use theactual hostname
KUBELET_HOSTNAME="--hostname-override=cluster1"
# location of the api-server
# KUBELET_API_SERVER=""
# Add your own!
KUBELET_ARGS="--cgroup-driver=cgroupfs\
--network-plugin=cni \
--cluster-dns=10.254.0.2 \
--resolv-conf=/etc/resolv.conf \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--fail-swap-on=false \
--cert-dir=/etc/kubernetes/ssl \
--cluster-domain=cluster.local. \
--hairpin-mode=promiscuous-bridge\
--serialize-image-pulls=false \
--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0"
根据官方文档要求 kubelet 配置必须增加--network-plugin=cni选项,所以需要修改 kubelet 配置
所有node节点都需要修改配置文件,都需要重启kubelet
systemctl daemon-reload
systemctl restart kubelet
创建calico Daemonset
kubectl apply -f https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/rbac.yaml
或者使用配置包下的rbac
再创建calico的daemonset
kubectl create -f calico.yaml
检查Daemonset和相应pod运行情况:
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-94b7cb897-krckw 1/1 Running 0 29m
calico-node-5dc8z 2/2 Running 0 29m
calico-node-gm9k8 2/2 Running 0 29m
calico-node-kt5fk 2/2 Running 0 29m
calico-node-xds45 2/2 Running 0 29m
kubectl get ds -n kube-system
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
calico-node 4 4 4 4 4 <none> 29m
重启kubelet、docker:
systemctl restart kubelet
systemctl restart docker
测试跨主机通讯
创建测试实例:
mkdir ~/demo
cd ~/demo
cat << EOF >> demo.deploy.yml
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: demo-deployment
spec:
replicas: 4
selector:
matchLabels:
app: demo
template:
metadata:
labels:
app: demo
spec:
containers:
- name: demo
image: mritd/demo
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
EOF
kubectl create -f demo.deploy.yml
验证通信:
kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
demo-deployment-5fc9c54fb4-5pgfk 1/1 Running 0 2m 192.168.177.65 cluster4
demo-deployment-5fc9c54fb4-5svgl 1/1 Running 0 2m 192.168.33.193 cluster1
demo-deployment-5fc9c54fb4-dfcfd 1/1 Running 0 2m 192.168.188.1 cluster2
demo-deployment-5fc9c54fb4-dttvb 1/1 Running 0 2m 192.168.56.65 cluster3
kubectl exec -tidemo-deployment-5fc9c54fb4-5svgl bash
bash-4.3# ping 192.168.56.66
PING 192.168.56.66 (192.168.56.66): 56 databytes
64 bytes from 192.168.56.66: seq=0 ttl=62time=0.407 ms
^C
--- 192.168.56.66 ping statistics ---
1 packets transmitted, 1 packets received,0% packet loss
round-trip min/avg/max = 0.407/0.407/0.407ms
至此,群集网络组件calico搭建完成
Kube-dns插件部署
简介:
kube-dns用来为kubernetesservice分配子域名,在集群中可以通过名称访问service。通常kube-dns会为service赋予一个名为“service名称.namespace.svc.cluster.local”的A记录,用来解析service的clusterip。
在实际应用中,如果访问default namespace下的服务,则可以通过“service名称”直接访问。如果访问其他namespace下的服务,则可以通过“service名称.namespace”访问。
上文复制,个人理解:上一个插件calico为每一个pod赋予唯一IP,但各个pod间访问却不能通过IP,因为IP是生成的,各节点互相不知道,那么dns对各个service创建父域名,对各个service下的pod创建子域名,那么各个节点通过service名称就可以互相访问了。
复制文件包下的kubedns文件夹到服务器
部署服务
如果不太好访问外网的话可以把需要的image load进来,在images文件夹下
kubectl create -f kube-dns.yaml
测试kubedns
创建两组 Pod 和 Service,进入 Pod 中 curl 另一个Service 名称看看是否能解析;同时还要测试一下外网能否解析
# 创建测试deply
cat > test.deploy.yml << EOF
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.13.5-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
EOF
# 创建test.deploy对应service
$cat > test.service.yml << EOF
kind: Service
apiVersion: v1
metadata:
name: nginx-service
spec:
selector:
app: nginx
ports:
-protocol: TCP
port: 80
targetPort: 80
nodePort: 31000
type: NodePort
EOF
# 为之前做的demo deploy创建service
$ cat > demo.service.yml << EOF
kind: Service
apiVersion: v1
metadata:
name: demo-service
spec:
selector:
app: demo
ports:
-protocol: TCP
port: 80
targetPort: 80
nodePort: 31001
type: NodePort
EOF
# 创建:
kubectl create -f test.deploy.yml
kubectl create -f test.service.yml
kubectl create -f demo.service.yml
查看
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
demo-deployment-5fc9c54fb4-5pgfk 1/1 Running 1 5h 192.168.177.66 node.132
demo-deployment-5fc9c54fb4-5svgl 1/1 Running 1 5h 192.168.33.194 node.131
demo-deployment-5fc9c54fb4-dfcfd 1/1 Running 1 5h 192.168.188.2 node.133
demo-deployment-5fc9c54fb4-dttvb 1/1 Running 1 5h 192.168.56.66 node.134
nginx-deployment-5d56d45798-24ptc 1/1 Running 0 1m 192.168.33.195 node.131
nginx-deployment-5d56d45798-gjr6s 1/1 Running 0 1m 192.168.188.3 node.133
nginx-deployment-5d56d45798-wtfcg 1/1 Running 0 1m 192.168.177.68 node.132
kubectl get service -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
demo-service NodePort 10.254.23.220 <none> 80:31001/TCP 1m app=demo
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 22h <none>
nginx-service NodePort 10.254.197.49 <none> 80:31000/TCP 1m app=nginx
# 测试dns解析-pod内部
kubectl exec -tidemo-deployment-5fc9c54fb4-5svgl bash
bash-4.3# curl http://nginx-service
<!DOCTYPE html>
<html>
<head>
<title>Welcome tonginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginxweb server is successfully installed and
working. Further configuration isrequired.</p>
<p>For online documentation andsupport please refer to
<ahref="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<ahref="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
更多推荐
所有评论(0)