K8S/Kubernetes k8s高可用集群二进制安装

k8s高可用集群二进制安装

系统版本:CentOS7.6Kubernetes版本:v1.13.4docker版本:18.06k8s-vip192.168.2.240k8s-m1192.168.2.241etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalivedk8s-m2192.168...

系统版本:CentOS7.6

Kubernetes版本:v1.13.4

docker版本:18.06

k8s-vip192.168.2.240
k8s-m1192.168.2.241etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived
k8s-m2192.168.2.242etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived
k8s-m3192.168.2.243etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived 
k8s-n1192.168.2.244kubelet kube-proxy
k8s-n2192.168.2.245kubelet kube-proxy
k8s-n3192.168.2.246kubelet kube-proxy


#主机名配置
cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.241 k8s-m1
192.168.2.242 k8s-m2
192.168.2.243 k8s-m3
192.168.2.244 k8s-n1
192.168.2.245 k8s-n2
192.168.2.246 k8s-n3

为了执行方便,需要做免秘钥登录

#后续操作在k8s-m1执行
cat /root/init_env.sh
#!/bin/bash
#关闭防火墙 SELINUX
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disable/' /etc/selinux/config
#关闭swap
swapoff -a && sysctl -w vm.swappiness=0
sed -i 's/.*swap.*/#&/g' /etc/fstab
#设置Docker所需参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
#加载ip_vs模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安装docker18.06版本 
yum -y install yum-utils device-mapper-persistent-data lvm2 wget epel-release ipvsadm vim ntpdate
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.1.ce-3.el7
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://gco4rcsp.mirror.aliyuncs.com"],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF
systemctl enable docker && systemctl daemon-reload && systemctl restart docker


#复制初始化脚本到所有节点并执行
chmod +x /root/init_env.sh
for node in k8s-{m,n}{1,2,3};do scp /root/init_env.sh root@$node:/root/;done
for node in k8s-{m,n}{1,2,3};do ssh root@$node /root/init_env.sh >> /dev/null;echo -e "$node install complete";done


#所有节点时间同步计划任务
0 * * * * ntpdate 202.112.10.36


#设置环境变量
export SOFT_DIR=/root/k8s_soft
export ETCD_version=v3.3.12
export ETCD_SSL_DIR=/etc/etcd/ssl
export K8S_DIR=/etc/kubernetes
export APISERVER_IP=192.168.2.240
export KUBE_APISERVER=https://192.168.2.240:8443
export SYSTEM_SERVICE_DIR=/usr/lib/systemd/system


#k8s二进制文件下载并复制到相应节点
mkdir -p ${SOFT_DIR} && cd ${SOFT_DIR} && wget  https://storage.googleapis.com/kubernetes-release/release/v1.13.4/kubernetes-server-linux-amd64.tar.gz && tar -zxvf kubernetes-server-linux-amd64.tar.gz && cd kubernetes/server/bin/
for node in k8s-m{1,2,3};do scp kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy root@$node:/usr/local/bin/;done
for node in k8s-n{1,2,3};do scp kubelet kube-proxy root@$node:/usr/local/bin/;done


#cfssl下载
cd ${SOFT_DIR} && wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson


#cni插件
export CNI_URL="https://github.com/containernetworking/plugins/releases/download"
export CNI_VERSION=v0.7.4
cd ${SOFT_DIR} && wget  "${CNI_URL}/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz"
mkdir -p /opt/cni/bin
tar -zxf cni-plugins-amd64-${CNI_VERSION}.tgz -C /opt/cni/bin
#复制可执行文件到所有节点
for node in k8s-m{2,3} k8s-n{1,2,3};do ssh root@$node mkdir -p /opt/cni/bin/;scp /opt/cni/bin/* root@$node:/opt/cni/bin;done


#ETCD下载
cd ${SOFT_DIR} && wget https://github.com/etcd-io/etcd/releases/download/${ETCD_version}/etcd-${ETCD_version}-linux-amd64.tar.gz
tar -zxvf etcd-${ETCD_version}-linux-amd64.tar.gz && cd etcd-${ETCD_version}-linux-amd64
#复制可执行文件到其余master节点
for node in k8s-m{1,2,3};do scp etcd* root@$node:/usr/local/bin/;done


#生成etcd CA、etcd证书
mkdir -p ${ETCD_SSL_DIR} && cd ${ETCD_SSL_DIR}
cat > ca-config.json <<EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > etcd-ca-csr.json <<EOF 
{"CN":"etcd","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"etcd","OU":"etcd"}]}
EOF
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes etcd-ca-csr.json | cfssljson -bare etcd
rm -rf *.json *.csr
for node in k8s-m{2,3};do ssh root@$node mkdir -p ${ETCD_SSL_DIR} /var/lib/etcd;scp * root@$node:${ETCD_SSL_DIR};done


#ETCD配置文件
cat /etc/etcd/config 
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.241:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.241:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.241:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.241:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.2.241:2380,etcd02=https://192.168.2.242:2380,etcd03=https://192.168.2.243:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
cat ${SYSTEM_SERVICE_DIR}/etcd.service
[Unit]
Description=Etcd Server
After=neCNork.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/config
ExecStart=/usr/local/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target


#复制配置文件及启动文件到其余主节点
for node in k8s-m{2,3};do scp /etc/etcd/config root@$node:/etc/etcd/;scp ${SYSTEM_SERVICE_DIR}/etcd.service root@$node:${SYSTEM_SERVICE_DIR};done


#修改配置文件后,主节点分别启动etcd
systemctl enable --now etcd


#检查etcd集群状态
etcdctl \
--ca-file=/etc/etcd/ssl/etcd-ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.2.241:2379,\
https://192.168.2.242:2379,\
https://192.168.2.243:2379" cluster-health


各组件证书及配置文件

k8s-m1执行后续操作

#集群CA
mkdir -p ${K8S_DIR}/pki && cd ${K8S_DIR}/pki
cat > ca-config.json << EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > ca-csr.json << EOF 
{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names":[{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "kubernetes","OU": "k8s"}]}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca


#kube-apiserver证书
cat > apiserver-csr.json <<EOF 
{"CN":"apiserver","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"kubernetes","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.2.240,127.0.0.1,kubernetes.default -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver


#kubelet https证书

cat > api-kubelet-client-csr.json << EOF
{"CN":"apiserver-kubelet-client","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:masters","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes api-kubelet-client-csr.json | cfssljson -bare api-kubelet-client


#controller manager证书及kubeconfig文件
cat > manager-csr.json << EOF
{"CN":"system:kube-controller-manager","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:kube-controller-manager","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes   manager-csr.json | cfssljson -bare controller-manager
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=controller-manager.pem --client-key=controller-manager-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig


#scheduler证书及kubeconfig文件
cat > scheduler-csr.json << EOF
{"CN":"system:kube-scheduler","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:kube-scheduler","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes   scheduler-csr.json | cfssljson -bare scheduler
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=scheduler.pem --client-key=scheduler-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=${K8S_DIR}/scheduler.kubeconfig


#admin证书(kubectl使用)及kubeconfig文件
cat > admin-csr.json << EOF
{"CN":"admin","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:masters","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config set-credentials kubernetes-admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=${K8S_DIR}/admin.kubeconfig


#Service Account Key
#kube-apiserver签名ServiceAccountToken的公钥文件,kube-controller-manager的--service-account-private-key-file指定私钥文件,两者配对使用
openssl genrsa -out sa.key 2048
openssl rsa -in sa.key -pubout -out sa.pub


#删除证书请求文件
rm -rf *.json *.csr


服务配置文件

#apiserver高可用部署  Haproxy+Keepalived
for node in k8s-m{1,2,3};do ssh root@$node yum -y install haproxy keepalived;done
#keepalived配置文件,其余节点修改state为BACKUP,priority小于主节点即可;检查网卡名称并修改
cat > /etc/keepalived/keepalived.conf << EOF 
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 3
}
vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    ${APISERVER_IP}
    }
     track_script {
        check_haproxy
     }
}
EOF
cat > /etc/keepalived/check_haproxy.sh <<EOF
#!/bin/bash
systemctl status haproxy > /dev/null
if [[ \$? != 0 ]];then
        echo "haproxy is down,close the keepalived"
        systemctl stop keepalived
fi
EOF
cat > /etc/haproxy/haproxy.cfg << EOF 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
frontend  k8s-api 
   bind *:8443
   mode tcp
   default_backend             apiserver
#---------------------------------------------------------------------
backend apiserver
    balance     roundrobin
    mode tcp
    server  k8s-m1 192.168.2.241:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m2 192.168.2.242:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m3 192.168.2.243:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
EOF


#复制配置文件到其余主节点
for node in k8s-m{2,3};do scp /etc/keepalived/* root@$node:/etc/keepalived;scp /etc/haproxy/haproxy.cfg root@$node:/etc/haproxy;done


#修改keepalived.conf文件并启动服务
#修改过程省略
for node in k8s-m{1,2,3};do ssh root@$node systemctl enable --now keepalived haproxy;done
#查看VIP是否工作正常
ping ${APISERVER_IP} -c 3


#APISERVER系统服务配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-apiserver.service << EOF 
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
  --authorization-mode=Node,RBAC \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset \
  --advertise-address=192.168.2.241 \
  --bind-address=192.168.2.241  \
  --insecure-port=0 \
  --secure-port=6443 \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/audit.log \
  --enable-swagger-ui=true \
  --storage-backend=etcd3 \
  --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.2.241:2379,https://192.168.2.242:2379,https://192.168.2.243:2379 \
  --event-ttl=1h \
  --enable-bootstrap-token-auth \
  --client-ca-file=/etc/kubernetes/pki/ca.pem \
  --kubelet-https=true \
  --kubelet-client-certificate=/etc/kubernetes/pki/api-kubelet-client.pem \
  --kubelet-client-key=/etc/kubernetes/pki/api-kubelet-client-key.pem \
  --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
  --runtime-config=api/all,settings.k8s.io/v1alpha1=true \
  --service-cluster-ip-range=10.96.0.0/12 \
  --service-node-port-range=30000-32767 \
  --service-account-key-file=/etc/kubernetes/pki/sa.pub \
  --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
  --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
  --feature-gates=PodShareProcessNamespace=true \
  --v=4
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF


#scheduler系统服务配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-scheduler.service <<EOF 
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --leader-elect=true \
  --kubeconfig=/etc/kubernetes/scheduler.kubeconfig \
  --address=127.0.0.1 \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF


#controller-manager系统服务配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-controller-manager.service << EOF 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --allocate-node-cidrs=true \
  --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authentication-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authorization-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --client-ca-file=/etc/kubernetes/pki/ca.pem \
  --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
  --bind-address=127.0.0.1 \
  --leader-elect=true \
  --cluster-cidr=10.244.0.0/16 \
  --service-cluster-ip-range=10.96.0.0/12 \
  --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
  --root-ca-file=/etc/kubernetes/pki/ca.pem \
  --use-service-account-credentials=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --experimental-cluster-signing-duration=86700h \
  --feature-gates=RotateKubeletClientCertificate=true \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF


#kube-proxy系统服务配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-proxy.service << EOF 
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kube-proxy.conf << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
    acceptContentTypes: ""
    burst: 10
    contentType: application/vnd.kubernetes.protobuf
    kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
    qps: 5
clusterCIDR: "10.244.0.0/16"
configSyncPeriod: 15m0s
conntrack:
    max: null
    maxPerCore: 32768
    min: 131072
    tcpCloseWaitTimeout: 1h0m0s
    tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
    masqueradeAll: true
    masqueradeBit: 14
    minSyncPeriod: 0s
    syncPeriod: 30s
ipvs:
    excludeCIDRs: null
    minSyncPeriod: 0s
    scheduler: ""
    syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
resourceContainer: /kube-proxy
udpIdleTimeout: 250ms
EOF


#kubelet系统服务配置文件
cat > ${SYSTEM_SERVICE_DIR}/kubelet.service << EOF 
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet-conf.yml \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --allow-privileged=true \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/cni/bin \
  --cert-dir=/etc/kubernetes/pki \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kubelet-conf.yml << EOF 
address: 0.0.0.0
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kind: KubeletConfiguration
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeLeaseDurationSeconds: 40
nodeStatusReportFrequency: 1m0s
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF


#分发证书及配置文件相应节点
#apiserver系统服务配置需要修改
for node in k8s-m{2,3} k8s-n{1,2,3};do ssh root@$node mkdir -p ${K8S_DIR}/pki;done
for node in k8s-m{2,3};do scp -r ${K8S_DIR}/* root@$node:${K8S_DIR};done
for node in k8s-m{2,3};do scp ${SYSTEM_SERVICE_DIR}/{kube-apiserver.service,kube-scheduler.service,kube-controller-manager.service,kubelet.service,kube-proxy.service} root@$node:${SYSTEM_SERVICE_DIR};done
for node in k8s-n{1,2,3};do scp ${K8S_DIR}/pki/ca.pem root@$node:${K8S_DIR}/pki;done
for node in k8s-m{1,2,3};do ssh root@$node mkdir /root/.kube ${K8S_DIR}/manifests;ssh root@$node cp ${K8S_DIR}/admin.kubeconfig /root/.kube/config;done


#启动服务
for node in k8s-m{1,2,3};do ssh root@$node systemctl enable --now kube-apiserver kube-controller-manager kube-scheduler;done


#验证组件是否正常

image.png



#本次安装启用了TLS认证,因此每个节点的kubelet都必须使用kube-apiserver的CA的凭证后,才能与kube-apiserver进行沟通,而该过程需要手动针对每台节点单独签署凭证是一件繁琐的事情,且一旦节点增加会延伸出管理不易问题;而TLS bootstrapping目标就是解决该问题,通过让kubelet先使用一个预定低权限使用者连接到kube-apiserver,然后在对kube-apiserver申请凭证签署,当授权Token一致时,Node节点的kubelet凭证将由kube-apiserver动态签署提供。具体作法可以参考TLS BootstrappingAuthenticating with Bootstrap Tokens

#kubelet证书及kubeconfig文件
cd ${K8S_DIR}/pki
#建立TLS bootstrap secret来提供自动签证使用
TOKEN_PUB=$(openssl rand -hex 3)
TOKEN_SECRET=$(openssl rand -hex 8)
BOOTSTRAP_TOKEN="${TOKEN_PUB}.${TOKEN_SECRET}"
kubectl -n kube-system create secret generic bootstrap-token-${TOKEN_PUB} \
        --type 'bootstrap.kubernetes.io/token' \
        --from-literal description="cluster bootstrap token" \
        --from-literal token-id=${TOKEN_PUB} \
        --from-literal token-secret=${TOKEN_SECRET} \
        --from-literal usage-bootstrap-authentication=true \
        --from-literal usage-bootstrap-signing=true
#建立bootstrap的kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user --token=${BOOTSTRAP_TOKEN} --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
#授权kubelet可以创建csr
kubectl create clusterrolebinding kubeadm:kubelet-bootstrap --clusterrole system:node-bootstrapper --group system:bootstrappers
#批准csr请求,允许system:bootstrappers组的所有csr
cat <<EOF | kubectl apply -f -
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-csrs-for-group
subjects:
- kind: Group
  name: system:bootstrappers
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  apiGroup: rbac.authorization.k8s.io
EOF
#允许kubelet能够更新自己的证书
cat <<EOF | kubectl apply -f -
# Approve renewal CSRs for the group "system:nodes"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-renewals-for-nodes
subjects:
- kind: Group
  name: system:nodes
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  apiGroup: rbac.authorization.k8s.io
EOF
#创建所需的clusterrole
cat <<EOF | kubectl apply -f -
# A ClusterRole which instructs the CSR approver to approve a user requesting
# node client credentials.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/nodeclient"]
  verbs: ["create"]
---
# A ClusterRole which instructs the CSR approver to approve a node renewing its
# own client credentials.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeclient"]
  verbs: ["create"]
EOF


#kube-proxy证书及kubeconfig文件
cat > ca-config.json << EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > kube-proxy-csr.json << EOF
{"CN":"system:kube-proxy","key":{"algo": "rsa","size":2048},"names":[{"C":"CN","L":"BeiJing","ST":"BeiJing","O":"kubernetes","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
rm -rf *.json *.csr


#复制配置文件及启动文件到相应节点
for node in k8s-m{2,3} k8s-n{1,2,3};do scp ${K8S_DIR}/{kube-proxy.kubeconfig,kube-proxy.conf,bootstrap-kubelet.kubeconfig,kubelet-conf.yml} root@$node:${K8S_DIR};done
for node in k8s-m{2,3} k8s-n{1,2,3};do scp ${SYSTEM_SERVICE_DIR}/{kubelet.service,kube-proxy.service} root@$node:${SYSTEM_SERVICE_DIR};done


#启动kubelet、kube-proxy服务
for node in k8s-{m,n}{1,2,3};do ssh root@$node systemctl enable --now kubelet kube-proxy;done


#查看节点状态

image.png


#确认使用IPVS模式
curl localhost:10249/proxyMode
ipvs


#部署flannel  修改--iface
cd ${SOFT_DIR} mkdir flannel && cd flannel
cat > kube-flannel.yml << EOF 
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unsed in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: amd64
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
EOF

kubectl apply -f kube-flannel.yml


image.png


image.png


#部署coredns
yum -y install epel-release && yum -y install jq
cd ${SOFT_DIR} mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.96.0.10 > coredns.yml
kubectl apply -f coredns.yml


#验证dns是否正常

image.png


#设定master节点加上污点Taint
kubectl taint nodes k8s-m1 k8s-m2 k8s-m3 node-role.kubernetes.io/master="":NoSchedule
#设置label role
kubectl label node k8s-m1 k8s-m2 k8s-m3  node-role.kubernetes.io/master=master
kubectl label node k8s-n1 k8s-n2 k8s-n3  node-role.kubernetes.io/node=node

image.png


参考:

https://blog.csdn.net/weixin_34238642/article/details/87387323

https://www.kubernetes.org.cn/4963.html

https://www.kubernetes.org.cn/5163.html

https://www.cnblogs.com/xiaoqshuo/articles/10195143.html 

https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/


转载于:https://blog.51cto.com/13740724/2393698

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐

  • 浏览量 439
  • 收藏 0
  • 0

所有评论(0)

查看更多评论 
已为社区贡献2条内容