二进制安装kubernets 1.20
1、基础环境准备1.1 服务器环境最小化安装基础系统,并关闭防火墙 selinux和swap,更新软件源、时间同步、安装常用命令,重启后验证基础配置这里使用Ubuntu18.04部署角色主机名ipk8s-master1k8s-master110.10.100.101k8s-master2k8s-master210.10.100.102k8s-master3k8s-master210.10.100.
1、基础环境准备
1.1 服务器环境
最小化安装基础系统,并关闭防火墙 selinux和swap,更新软件源、时间同步、安装常用命令,重启后验证基础配置
这里使用Ubuntu18.04部署
角色 | 主机名 | ip |
---|---|---|
k8s-master1 | k8s-master1 | 10.10.100.101 |
k8s-master2 | k8s-master2 | 10.10.100.102 |
k8s-master3 | k8s-master2 | 10.10.100.103 |
haproxy1 | haproxy1 | 10.10.100.104 |
haproxy2 | haproxy2 | 10.10.100.105 |
harbor | harbor | 10.10.100.111 |
node1 | k8s-node1 | 10.10.100.107 |
node2 | k8s-node2 | 10.10.100.108 |
node3 | k8s-node3 | 10.10.100.109 |
etcd1 | k8s-etcd1 | 10.10.100.112 |
etcd2 | k8s-etcd2 | 10.10.100.113 |
etcd3 | k8s-etcd3 | 10.10.100.114 |
2、 反向代理
2.1 keepalived安装配置
2.1.1 安装
#安装keepalived
root@haproxy1:~# apt-get install keepalived -y
#拷贝配置文件
root@haproxy1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf
2.1.2 配置
master
root@haproxy1:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_garp_interval 0
vrrp_gna_interval 0
vrrp_mcast_group4 224.0.0.18
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 10.10.100.104
unicast_peer {
10.10.100.105
}
virtual_ipaddress {
10.10.100.188 dev eth0 label eth0:1
}
}
BACKUP
root@haproxy2:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_garp_interval 0
vrrp_gna_interval 0
vrrp_mcast_group4 224.0.0.18
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 80
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 10.10.100.105
unicast_peer {
10.10.100.104
}
virtual_ipaddress {
10.10.100.188 dev eth0 label eth0:1
}
}
2.2 haproxy安装配置
2.2.1 安装
root@haproxy1:~# apt-get install software-properties-common
root@haproxy1:~# add-apt-repository ppa:vbernat/haproxy-2.0
root@haproxy1:~# apt update
root@haproxy1:~# apt-cache madison haproxy
root@haproxy1:~# apt install haproxy=2.0.28-1ppa1~bionic -y
2.2.3 配置文件
listen stats
mode http
bind 0.0.0.0:9999
stats enable
log global
stats uri /haproxy-status
stats auth haadmin:123456
listen k8s-6443
bind 10.10.100.188:6443
mode tcp
balance roundrobin
server 10.10.100.101 10.10.100.101:6443 check inter 2s fall 3 rise 5
server 10.10.100.102 10.10.100.102:6443 check inter 2s fall 3 rise 5
server 10.10.100.103 10.10.100.103:6443 check inter 2s fall 3 rise 5
2.2.4 修改内核参数
允许绑定非本机ip
echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf
sysctl -p
3、安装 harbor
#安装docker
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt-get -y update
apt-get -y install docker-ce=5:20.10.10~3-0~ubuntu-bionic
#安装docker-compose
apt install python-pip -y
pip install --upgrade pip
pip install docker-compose
#安装harbor
cd /usr/local/src
wget https://storage.googleapis.com/harbor-releases/release-1.7.0/harbor-offline-installer-v1.7.6.tgz
tar xf harbor-offline-installer-v1.7.6.tgz
ln -sv /usr/local/src/harbor /usr/local/
#修改harbor配置文件
cd /usr/local/harbor
vim harbor.cfg
#修改如下配置
hostname = 10.10.100.111
ui_url_protocol = http
harbor_admin_password = 123456
#执行安装脚本
./install.sh
4、二进制部署k8s
4.1 系统初始化
#关闭swap
swapoff -a # 临时关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久关闭
#根据规划设置主机名
hostnamectl set-hostname <hostname> #设置对应的主机名
# 各节点添加hosts
cat >> /etc/hosts <<EOF
10.10.100.101 k8s-master1
10.10.100.102 k8s-master2
10.10.100.103 k8s-master3
10.10.100.107 k8s-node1
10.10.100.108 k8s-node2
10.10.100.109 k8s-node3
EOF
#修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
#安装ipvsadm
apt install ipvsadm ipset -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF
#时间同步
apt install ntpdate -y
ntpdate time1.aliyun.com
5、部署etcd集群
5.1 准备cfssl 证书生成工具
找任意一台服务器操作即可
#下载cfssl工具
mkdir /opt/cfssl -p && cd /opt/cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
cp cfssl_linux-amd64 /usr/local/bin/cfssl
cp cfssljson_linux-amd64 /usr/local/bin/cfssljson
cp cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
5.2 生成Etcd证书
5.2.1 自签CA证书
#创建工作目录
mkdir -p ~/TLS/{etcd,k8s}
cd ~/TLS/etcd/
#自签CA证书
#证书生成模板
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
#csr请求模板
cat > ca-csr.json << EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen"
}
]
}
EOF
#生成证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
5.2.2 签发ETCD证书
#创建证书申请文件,hosts 填写etcd所有节点ip
cat > server-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"10.10.100.112",
"10.10.100.113",
"10.10.100.114"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen"
}
]
}
EOF
#签发证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
5.3 下载安装etcd集群
5.3.1 下载etcd二进制文件
cd /opt
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
5.3.2 配置文件准备
#解压
tar xf etcd-v3.4.9-linux-amd64.tar.gz
#拷贝二进制文件
mkdir /opt/etcd/{bin,cfg,ssl} -p
cp etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin
#创建etcd配置文件
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.10.100.112:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.10.100.112:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.10.100.112:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.10.100.112:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.10.100.112:2380,etcd-2=https://10.10.100.113:2380,etcd-3=https://10.10.100.114:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
•ETCD_NAME:节点名称,集群中唯一
•ETCD_DATA_DIR:数据目录
•ETCD_LISTEN_PEER_URLS:集群通信监听地址
•ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
•ETCD_INITIAL_ADVERTISE_PEERURLS:集群通告地址
•ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
•ETCD_INITIAL_CLUSTER:集群节点地址
•ETCD_INITIALCLUSTER_TOKEN:集群Token
•ETCD_INITIALCLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
#配置文件中不可以有注释,不然会启动报错。
#创建systemd启动文件
cat > /lib/systemd/system//etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem --logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
5.3.3 拷贝刚生成的证书文件
#将刚刚生成的证书拷贝到etcd节点
scp ./*pem 10.10.100.112/opt/etcd/ssl/
5.3.4 将节点1的所有etcd文件拷贝到其他节点
scp -r /opt/etcd 10.10.100.113:/opt/
scp -r /lib/systemd/system/etcd.service 10.10.100.114:/lib/systemd/system/
scp -r /opt/etcd 10.10.100.114:/opt/
scp -r /lib/systemd/system/etcd.service 10.10.100.113:/lib/systemd/system/
5.3.5 修改node2,node3节点etcd.conf配置文件
vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2" # 修改此处,节点2改为etcd-2,节点3改为etcd-3
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.10.100.113:2380" # 修改此处为当前服务器IP
ETCD_LISTEN_CLIENT_URLS="https://10.10.100.113:2379" # 修改此处为当前服务器IP
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.10.100.113:2380" # 修改此处为当前服务器IP
ETCD_ADVERTISE_CLIENT_URLS="https://10.10.100.113:2379" # 修改此处为当前服务器IP
ETCD_INITIAL_CLUSTER="etcd-1=https://10.10.100.112:2380,etcd-2=https://10.10.100.113:2380,etcd-3=https://10.10.100.114:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
5.3.6 启动并验证etcd集群信息
#启动
systemctl daemon-reload
systemctl start etcd && systemctl enable etcd
#验证etcd集群信息,输出下面信息,说明部署成功
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://10.10.100.112:2379,https://10.10.100.113:2379,https://10.10.100.114:2379" endpoint health --write-out=table
+----------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+----------------------------+--------+-------------+-------+
| https://10.10.100.112:2379 | true | 6.255737ms | |
| https://10.10.100.113:2379 | true | 9.044208ms | |
| https://10.10.100.114:2379 | true | 11.886294ms | |
+----------------------------+--------+-------------+-------+
6、 Master 部署
6.1 安装docker
master及node节点都安装docker
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt-get -y update
#安装20.10.15版本docker
apt-get -y install docker-ce=5:20.10.15~3-0~ubuntu-bionic docker-ce-cli=5:20.10.15~3-0~ubuntu-bionic
#启动docker
root@docker-node1:~# systemctl start docker
root@docker-node1:~# systemctl enable docker
6.2 kube-apiserver 部署
6.2.1 生成kube-apiserver 证书
6.2.1.1 自签CA证书
cd ~/TLS/k8s/
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
#生成CA证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
6.2.1.2 签发kube-apiserver HTTPS证书
上述文件hosts字段中IP为所有Master/LB/VIP IP,一个都不能少!为了方便后期扩容可以多写几个预留的IP。
cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"10.10.100.101",
"10.10.100.102",
"10.10.100.103",
"10.10.100.188",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
6.2.2 下载k8s二进制文件
下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md
只需要下载server包就够了,包含了Master和Worker Node二进制文件
#下载v1.20.13版本
wget https://dl.k8s.io/v1.20.13/kubernetes-server-linux-amd64.tar.gz
##解压
tar xf kubernetes-server-linux-amd64.tar.gz
#拷贝二进制文件
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
cd kubernetes/server/bin/
cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin/
cp kubectl /usr/bin/
6.2.3 部署kube-apiserver
6.2.3.1 创建kube-apiserver配置文件
3个master节点都配置,注意–bind-address --advertise-address两个参数需配置成对应节点的ip
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--etcd-servers=https://10.10.100.112:2379,https://10.10.100.113:2379,https://10.10.100.114:2379 \\
--bind-address=10.10.100.101 \\
--secure-port=6443 \\
--advertise-address=10.10.100.101 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-40000 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-issuer=api \\
--service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
--requestheader-allowed-names=kubernetes \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF
#参数详解
–logtostderr:启用日志
—v:日志等级
–log-dir:日志目录
–etcd-servers:etcd集群地址
–bind-address:监听地址
–secure-port:https安全端口
–advertise-address:集群通告地址
–allow-privileged:启用授权
–service-cluster-ip-range:Service虚拟IP地址段
–enable-admission-plugins:准入控制模块
–authorization-mode:认证授权,启用RBAC授权和节点自管理
–enable-bootstrap-token-auth:启用TLS bootstrap机制
–token-auth-file:bootstrap token文件
–service-node-port-range:Service nodeport类型默认分配端口范围
–kubelet-client-xxx:apiserver访问kubelet客户端证书
–tls-xxx-file:apiserver https证书
–service-account-issuer,–service-account-signing-key-file
–etcd-xxxfile:连接Etcd集群证书
–audit-log-xxx:审计日志
启动聚合层相关配置:–requestheader-client-ca-file,–proxy-client-cert-file,–proxy-client-key-file,–requestheader-allowed-names,–requestheader-extra-headers-prefix,–requestheader-group-headers,–requestheader-username-headers,–enable-aggregator-routing
6.2.3.2 拷贝证书
#拷贝etcd证书
mkdir /opt/etcd/ssl/
scp -r ~/TLS/etcd/*pem 10.10.100.101:/opt/etcd/ssl/
#拷贝k8s证书
scp ~/TLS/k8s/*.pem 10.10.100.101:/opt/kubernetes/ssl/
6.2.3.2 启用TLS Bootstrapping 机制
cat > /opt/kubernetes/cfg/token.csv << EOF
9deea7a5a8b7fa15c500a4dac68209c8,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
格式:token,用户名,UID,用户组
token也可自行生成替换:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
6.2.3.2 配置启动文件
cat > /lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl daemon-reload
systemctl start kube-apiserver && systemctl enable kube-apiserver
6.3 部署kube-controller-manager
6.3.1 创建配置文件
所有master节点配置都一样
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--cluster-signing-duration=87600h0m0s"
EOF
–kubeconfig:连接apiserver配置文件
–leader-elect:当该组件启动多个时,自动选举(HA)
–cluster-signing-cert-file/–cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致
6.3.2 生成kube-controller-manager证书
cd ~/TLS/k8s
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
6.3.3 生成kubeconfig文件
#拷贝证书,将证书拷贝到所有master节点
scp kube-controller-manager*.pem 10.10.100.101:/opt/kubernetes/ssl/
scp kube-controller-manager*.pem 10.10.100.102:/opt/kubernetes/ssl/
scp kube-controller-manager*.pem 10.10.100.103:/opt/kubernetes/ssl/
#在所有master节点操作
#需要在/opt/kubernetes/ssl/目录下执行命令或脚本
#KUBE_APISERVER可以配置成集群vip,也可以配置对应master的ip
cd /opt/kubernetes/ssl/
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://10.10.100.188:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
6.3.4 配置启动文件
cat > /lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl daemon-reload
systemctl start kube-controller-manager && systemctl enable kube-controller-manager
6.4 部署kube-scheduler
6.4.1 生成kube-scheduler证书
cd ~/TLS/k8s/
cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#拷贝证书到各master节点
scp /root/TLS/k8s/kube-scheduler*.pem 10.10.100.101:/opt/kubernetes/ssl/
scp /root/TLS/k8s/kube-scheduler*.pem 10.10.100.102:/opt/kubernetes/ssl/
scp /root/TLS/k8s/kube-scheduler*.pem 10.10.100.103:/opt/kubernetes/ssl/
6.4.2 创建配置文件
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--bind-address=127.0.0.1"
EOF
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
6.4.3 生成kubeconfig文件
#cd到证书目录下执行
cd /opt/kubernetes/ssl
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://10.10.100.188:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-scheduler \
--client-certificate=./kube-scheduler.pem \
--client-key=./kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
6.4.4 配置启动文件
cat > /lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl daemon-reload
systemctl start kube-scheduler && systemctl enable kube-scheduler
6.5 查看集群状态
6.5.1 生成kubectl连接集群的证书
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#拷贝证书
scp /root/TLS/k8s/admin*.pem 10.10.100.101:/opt/kubernetes/ssl/
scp /root/TLS/k8s/admin*.pem 10.10.100.102:/opt/kubernetes/ssl/
scp /root/TLS/k8s/admin*.pem 10.10.100.103:/opt/kubernetes/ssl/
6.5.2 生成kubeconfig文件
mkdir /root/.kube
cd /opt/kubernetes/ssl/
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://10.10.100.188:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
6.5.3 验证查看
kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
6.5.4 授权kubelet-bootstrap用户允许请求证书
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
7、Worker Node部署
如下操作还是在master操作
7.1 部署kubelet
7.1.1 创建配置文件
cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=k8s-master1 \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF
•--hostname-override:显示名称,集群中唯一
•--network-plugin:启用CNI
•--kubeconfig:空路径,会自动生成,后面用于连接apiserver
•--bootstrap-kubeconfig:首次启动向apiserver申请证书
•--config:配置参数文件
•--cert-dir:kubelet证书生成目录
•--pod-infra-container-image:管理Pod网络容器的镜像
7.1.2 创建参数文件
cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
7.1.3 生成kubectl初次加入集群引导kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig"
KUBE_APISERVER="https://10.10.100.188:6443"
TOKEN="9deea7a5a8b7fa15c500a4dac68209c8"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
7.1.4 创建启动文件
cat > /lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl daemon-reload
systemctl start kubelet && systemctl enable kubelet
7.1.5 批准kubelet证书申请并加入集群
root@k8s-master1:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
node-csr-bKoDAsJAuVMRzElq3WcabiL2yRTmMEk6QmQ-VR2VYc0 10s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap <none> Pending
kubectl certificate approve node-csr-bKoDAsJAuVMRzElq3WcabiL2yRTmMEk6QmQ-VR2VYc0
7.1.6 查看节点信息
root@k8s-master1:~# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master1 NotReady <none> 21m v1.20.13
k8s-master2 NotReady <none> 9m8s v1.20.13
k8s-master3 NotReady <none> 8m49s v1.20.13
7.2 部署kube-proxy
7.2.1 创建配置文件
cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF
7.2.2 创建参数文件
cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master1
clusterCIDR: 10.244.0.0/16
EOF
#
hostnameOverride 修改为对应主机名
7.2.3 生成证书
cd ~/TLS/k8s
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
#拷贝证书文件
scp kube-proxy*.pem 10.10.100.101:/opt/kubernetes/ssl/
scp kube-proxy*.pem 10.10.100.102:/opt/kubernetes/ssl/
scp kube-proxy*.pem 10.10.100.103:/opt/kubernetes/ssl/
7.2.4 生成kube-proxy.kubeconfig文件
cd /opt/kubernetes/ssl/
KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
KUBE_APISERVER="https://10.10.100.188:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
7.2.5 创建启动文件
cat > /lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl daemon-reload
systemctl start kube-proxy && systemctl enable kube-proxy
7.3 部署calico网络组件
以下步骤只在master01操作
Calico是一个纯三层的数据中心网络方案,是目前Kubernetes主流的网络方案。
#下载yaml文件
wget https://docs.projectcalico.org/manifests/calico.yaml
#启用
kubectl apply -f calico.yaml
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7c968d79b6-4wvxs 1/1 Running 0 7m58s
kube-system calico-node-55ndv 1/1 Running 0 7m58s
kube-system calico-node-mmcmk 1/1 Running 0 7m58s
kube-system calico-node-rm7lc 1/1 Running 3 7m58s
root@k8s-master1:~# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready <none> 45m v1.20.13
k8s-master2 Ready <none> 33m v1.20.13
k8s-master3 Ready <none> 33m v1.20.13
7.4 或者部署Flanne网络组件
7.4.1 部署CNI网络
CNI二进制包下载地址:
https://github.com/containernetworking/plugins/releases
v1.0.1下载链接:
https://github.com/containernetworking/plugins/releases/download/v1.0.1/cni-plugins-linux-amd64-v1.0.1.tgz
7.4.2 解压并安装CNI
#下载CNI二进制安装包
cd /opt/k8s/package
wget https://github.com/containernetworking/plugins/releases/download/v1.0.1/cni-plugins-linux-amd64-v1.0.1.tgz
#创建配置目录
mkdir /opt/cni/bin /etc/cni/net.d -p
tar xf cni-plugins-linux-amd64-v1.0.1.tgz -C /opt/cni/bin
#将CNI配置上传到每个Node
scp -r /opt/cni/ 10.10.100.102:/opt/
scp -r /opt/cni/ 10.10.100.103:/opt/
scp -r /opt/cni/ 10.10.100.107:/opt/
scp -r /opt/cni/ 10.10.100.108/opt/
scp -r /opt/cni/ 10.10.100.108:/opt/
scp -r /opt/cni/ 10.10.100.109:/opt/
#确保kubelet启用CNI
cat /opt/kubernetes/cfg/kubelet.conf
--network-plugin=cni
7.4.3 安装flannel网络插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
7.5 授权apiserver访问kubelet
应用场景:例如kubectl logs
cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
kubectl apply -f apiserver-to-kubelet-rbac.yaml
7.6 添加其他node节点
7.6.1 创建工作目录并拷贝二进制文件及配置文件
从master节点拷贝到所有node节点
#拷贝二进制包
cd ~/kubernetes/server/bin/
scp kubelet kube-proxy root@10.10.100.107:/opt/kubernetes/bin
scp kubelet kube-proxy root@10.10.100.108:/opt/kubernetes/bin
scp kubelet kube-proxy root@10.10.100.109:/opt/kubernetes/bin
#拷贝k8s所有配置
scp -r /opt/kubernetes root@10.10.100.107:/opt/
scp -r /opt/kubernetes root@10.10.100.108:/opt/
scp -r /opt/kubernetes root@10.10.100.109:/opt/
#拷贝启动文件
scp -r /lib/systemd/system/{kubelet,kube-proxy}.service root@10.10.100.107:/lib/systemd/system
scp -r /lib/systemd/system/{kubelet,kube-proxy}.service root@10.10.100.108:/lib/systemd/system
scp -r /lib/systemd/system/{kubelet,kube-proxy}.service root@10.10.100.109:/lib/systemd/system
7.6.2 删除对应文件
在刚拷贝的node节点操作
rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
rm -f /opt/kubernetes/ssl/kubelet*
#注:这几个文件是证书申请审批后自动生成的,每个Node不同,必须删除
7.6.3 生成kubectl连接集群的证书
需要安装cfssl,安装参考5.1
在node1节点操作
cd /opt/kubernetes/ssl
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "GuangDong",
"ST": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#拷贝证书到其他节点
scp /opt/kubernetes/ssl/admin*.pem 10.10.100.108:/opt/kubernetes/ssl/
scp /opt/kubernetes/ssl/admin*.pem 10.10.100.109:/opt/kubernetes/ssl/
7.6.4 生成kubeconfig文件
mkdir /root/.kube
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://10.10.100.188:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
#查看集群状态
kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
7.6.5 修改配置文件主机名
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-node1
vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node1
#修改成对应node节点的主机名
7.6.6 启动kubelet kube-proxy
systemctl daemon-reload
systemctl start kubelet kube-proxy && systemctl enable kubelet kube-proxy
7.6.7 在Master上批准新Node kubelet证书申请
#查看请求
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-LxU_i-sMmkHdLTBp85yTRHGhvrdAmWhQeXXn-flh7aU 73m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-Tr4RBOFVI_OsLzpyzFVVVNFjKfUoTGzTWBBqIYWiBY8 26s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-aQ1l1Uz99tJXcg_Fu-by290QCuNg5MNGwnz9dO0SnpE 38m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
#批准请求
kubectl certificate approve node-csr-Tr4RBOFVI_OsLzpyzFVVVNFjKfUoTGzTWBBqIYWiBY8
kubectl certificate approve node-csr-LxU_i-sMmkHdLTBp85yTRHGhvrdAmWhQeXXn-flh7aU
kubectl certificate approve node-csr-aQ1l1Uz99tJXcg_Fu-by290QCuNg5MNGwnz9dO0SnpE
7.7 查看节点状态
root@k8s-master1:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready <none> 2d15h v1.20.13
k8s-master2 Ready <none> 2d15h v1.20.13
k8s-master3 Ready <none> 2d15h v1.20.13
k8s-node1 Ready <none> 12m v1.20.13
k8s-node2 Ready <none> 8m50s v1.20.13
k8s-node3 Ready <none> 77s v1.20.13
8、部署Dashboard
8.1 下载部署
#下载
cd /opt/kubernetes/dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
#启动
mv recommended.yaml dashboard.yaml
kubectl apply -f dashboard.yaml
#将ClusterIP修改为NodePort
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
type: ClusterIP #修改为NodePort
kubectl edit svc dashboard-metrics-scraper -n kubernetes-dashboard
type: ClusterIP #修改为NodePort
#创建cluster-admin管理员集群角色:
cat > dashboard-adminuser.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
#启动
kubectl apply -f dashboard-adminuser.yaml
# 获取用户Token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
8.2 访问
访问地址:https://NodeIP:30001
9、部署CoreDNS
9.1 coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: lizhenliang/coredns:1.2.2
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.0.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
9.2 启动
kubectl apply -f coredns.yaml
更多推荐
所有评论(0)