初始化设置服务器

关闭防火墙

]# systemctl stop firewalld 
]# systemctl disable firewalld

关闭 selinux

]# sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久关闭
]# setenforce 0 # 临时关闭

关闭 swap

]# swapoff -a # 临时关闭
]# sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久关闭

根据规划设置主机名

]# hostnamectl set-hostname <hostname>

内核参数优化

]# cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.tcp_keepalive_time=600 #此参数表示TCP发送keepalive探测消息的间隔时间(秒)
net.ipv4.tcp_keepalive_intvl=30 #tcp检查间隔时间(keepalive探测包的发送间隔)
net.ipv4.tcp_keepalive_probes=10  #tcp检查次数(如果对方不予应答,探测包的发送次数)
net.ipv6.conf.all.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.default.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.lo.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv4.neigh.default.gc_stale_time=120 #ARP缓存条目超时
net.ipv4.conf.all.rp_filter=0  #默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0 #不开启源地址校验
net.ipv4.conf.default.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.lo.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.all.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.ip_local_port_range= 45001 65000 # 定义网络连接可用作其源(本地)端口的最小和最大端口的限制,同时适用于TCP和UDP连接。
net.ipv4.ip_forward=1 # 其值为0,说明禁止进行IP转发;如果是1,则说明IP转发功能已经打开。
net.ipv4.tcp_max_tw_buckets=6000 #配置服务器 TIME_WAIT 数量
net.ipv4.tcp_syncookies=1 #此参数应该设置为1,防止SYN Flood
net.ipv4.tcp_synack_retries=2 #表示回应第二个握手包(SYN+ACK包)给客户端IP后,如果收不到第三次握手包(ACK包),进行重试的次数(默认为5)
net.bridge.bridge-nf-call-ip6tables=1 # 是否在ip6tables链中过滤IPv6包
net.bridge.bridge-nf-call-iptables=1 # 二层的网桥在转发包时也会被iptables的FORWARD规则所过滤,这样有时会出现L3层的iptables rules去过滤L2的帧的问题
net.netfilter.nf_conntrack_max=2310720 #连接跟踪表的大小,建议根据内存计算该值CONNTRACK_MAX = RAMSIZE (in bytes) / 16384 / (x / 32),并满足nf_conntrack_max=4*nf_conntrack_buckets,默认262144

net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536

#gc_thresh3 是表大小的绝对限制
#gc_thresh2 设置为等于系统的最大预期邻居条目数的值
#在这种情况下,gc_thresh3 应该设置为一个比 gc_thresh2 值高的值,例如,比 gc_thresh2 高 25%-50%,将其视为浪涌容量。
#gc_thresh1 提高到较大的值;此设置的作用是,如果表包含的条目少于 gc_thresh1,内核将永远不会删除(超时)过时的条目。

net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216 # 最大的TCP数据发送窗口大小
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.core.somaxconn = 32768 # 第二个积压队列长度
fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963 # 文件描述符的最大值
fs.nr_open=52706963 #设置最大微博号打开数
kernel.pid_max = 4194303 #最大进程数
net.bridge.bridge-nf-call-arptables=1 #是否在arptables的FORWARD中过滤网桥的ARP包
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144 
EOF
]# vim  /etc/security/limits.conf  
*	soft	nofile	65535
*	hard	nofile	65535

配置hosts

]# cat >>  /etc/hosts << EOF
172.16.100.30 k8s-master  #IP+hostname
172.16.100.31 k8s-node1  #IP+hostname
172.16.100.32 k8s-node2  #IP+hostname
EOF 

二进制部署k8s集群

利用cfssl工具生成ssl证书

]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
]# mv cfssl_linux-amd64  /usr/local/bin/cfssl
]# mv cfssljson_linux-amd64 /usr/localbin/cfssljson
]# mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
]# chmod +x /usr/local/bin/cfssl*

生成etcd的ssl证书

]# mkdir /data/TLS/{etcd,k8s,calico}
]# cd  /data/TLS/etcd/
]#cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF

]# cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s", 
"OU": "System"
}
]
}
EOF

]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
]# cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"172.16.100.30",   #集群IP
"172.16.100.31",   #集群IP
"172.16.100.32",   #集群IP(集群IP可以根据架构规划多写几个IP)
"kubernetes", 
"kubernetes.default", 
"kubernetes.default.svc", 
"kubernetes.default.svc.cluster", 
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s", 
"OU": "System"
}
]
}
EOF

]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

部署etcd集群 (先下载好etcd包)

]# mkdir  -p  /data/etcd/{bin,logs,cfg,ssl}
]# tar -xf etcd-v3.4.9-linux-amd64.tar.gz && cd etcd-v3.4.9-linux-amd64
]# cp -r  etcd etcdctl  /data/etcd/bin/
]# cp -r  /data/TLS/etcd/ca*.pem  /data/TLS/etcd/server*.pem   /data/etcd/ssl/
]# cd /data/etcd/cfg
]# cat > etcd.conf << EOF
#[Member] 
ETCD_NAME="etcd-1" 
ETCD_DATA_DIR="/data/etcd/default.etcd" 
ETCD_LISTEN_PEER_URLS="https://172.16.100.30:2380" 
ETCD_LISTEN_CLIENT_URLS="https://172.16.100.30:2379" 
#[Clustering] 
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.100.30:2380" 
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.100.30:2379" 
ETCD_INITIAL_CLUSTER="etcd-1=https://172.16.100.30:2380,etcd-2=https://172.16.100.31:2380,etcd-3=https://172.16.100.32:2380" 
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" 
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

]# cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit] 
Description=Etcd 
Server After=network.target 
After=network-online.target 
Wants=network-online.target 
[Service] 
Type=notify 
EnvironmentFile=/data/etcd/cfg/etcd.conf 
ExecStart=/data/etcd/bin/etcd \
--cert-file=/data/etcd/ssl/server.pem \
--key-file=/data/etcd/ssl/server-key.pem \
--peer-cert-file=/data/etcd/ssl/server.pem \
--peer-key-file=/data/etcd/ssl/server-key.pem \
--trusted-ca-file=/data/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/data/etcd/ssl/ca.pem
Restart=on-failure 
LimitNOFILE=65536 
[Install] 
WantedBy=multi-user.target
EOF

]# systemctl daemon-reload && systemctl enable etcd  && systemctl start etcd
注:将/data/etcd目录和etcd.service之际拷贝到集群其他服务器,并修改对应IP和ETCD_NAME就好了
ETCD_NAME:节点名称,集群中唯一 
ETCD_DATA_DIR:数据目录 
ETCD_LISTEN_PEER_URLS:集群通信监听地址 
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址 
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址 
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址 
ETCD_INITIAL_CLUSTER:集群节点地址 
ETCD_INITIAL_CLUSTER_TOKEN:集群 
Token ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入 已有集群

查看验证etcd集群状态

]# ETCDCTL_API=3 etcdctl \
--cacert=/opt/etcd/ssl/ca.pem \
--cert=/opt/etcd/ssl/server.pem \
--key=/opt/etcd/ssl/server-key.pem \
--endpoints="https://192.168.31.71:2379,https://192.168.31.72:2379,https://192.16 8.31.73:2379" \
endpoint health
出现一下信息证明集群部署成功:
https://192.168.31.71:2379 is healthy: successfully committed proposal: took = 8.154404ms
https://192.168.31.73:2379 is healthy: successfully committed proposal: took = 9.044117ms 
https://192.168.31.72:2379 is healthy: successfully committed proposal: took = 10.000825ms
注:因为etcd开启了TLS,访问查询etcd的数据都要加上证书,建议在/etc/bashrc添加:
alias etcdctl='ETCDCTL_API=3 etcdctl \
--cacert=/data/etcd/ssl/ca.pem  \
--cert=/data/etcd/ssl/server.pem \
--key=/data/etcd/ssl/server-key.pem \
--endpoints="https://172.16.100.30:2379,https://172.16.100.31:2379,https://172.16.100.32:2379"'

部署docker环境

]# wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
]# tar -xf docker-19.03.9.tgz
]# cp -r  docker/*  /usr/local/bin/
]# cat > /usr/lib/systemd/system/docker.service << EOF 
[Unit]
Description=Docker Application Container Engine 
Documentation=https://docs.docker.com 
After=network-online.target firewalld.service 
Wants=network-online.target
[Service]
Type=notify 
ExecStart=/usr/local/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target 
EOF
]# mkdir /etc/docker  /data/docker
]# cat > /etcd/dokcer/daemon.json << EOF
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
  "data-root": "/data/docker"
}
EOF
]# systemctl daemon-reload && systemctl enable docker && systemctl start docker
注:以上的docker操作在所有集群机器操作,有网络的同样也可以yum安装

在master部署kube-apiserver组件

生成kube-apiserver的ssl证书

]# cd /data/TLS/k8s
]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

]# cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

]# cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "172.16.100.30",
      "172.16.100.31",
      "172.16.100.32",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
]# mkdir -p /data/kubernetes/{bin,logs,cfg,logs}
]# cp -r ca*.pem server*.pem  /data/kubernetes/ssl/
]# wget https://dl.k8s.io/v1.20.15/kubernetes-server-linux-amd64.tar.gz
]# tar -xf  kubernetes-server-linux-amd64.tar.gz
]# cp -r  kubernetes/server/bin/kube*  /data/kubernetes/bin/
]# cp -r  kubernetes/server/bin/kube*  /usr/local/bin/
]# cd /data/kubernetes/cfg
]# cat > kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--etcd-servers=https://172.16.100.30:2379,https://172.16.100.31:2379,https://172.16.100.32:2379 \
--bind-address=172.16.100.30 \
--secure-port=6443 \
--advertise-address=172.16.100.30 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/data/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/data/kubernetes/ssl/server.pem \
--kubelet-client-key=/data/kubernetes/ssl/server-key.pem \
--tls-cert-file=/data/kubernetes/ssl/server.pem \
--tls-private-key-file=/data/kubernetes/ssl/server-key.pem \
--client-ca-file=/data/kubernetes/ssl/ca.pem \
--service-account-key-file=/data/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/data/etcd/ssl/ca.pem \
--etcd-certfile=/data/etcd/ssl/server.pem \
--etcd-keyfile=/data/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/data/kubernetes/logs/kubernetes-audit.log"
EOF

]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '   ##生成token
]# cat > token.csv << EOF        ##将刚刚生成的token字串复制到文件
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node- bootstrapper"
EOF

]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit] 
Description=Kubernetes API Server 
Documentation=https://github.com/kubernetes/kubernetes
 
[Service] 
EnvironmentFile=/data/kubernetes/cfg/kube-apiserver.conf 
ExecStart=/data/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS 
Restart=on-failure
 
[Install] 
WantedBy=multi-user.target
EOF

]# systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver
]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

在master部署kube-controller-manager组件

]# cat > kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--leader-elect=true \
--master=127.0.0.1:8080 \
--bind-address=127.0.0.1 \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-signing-cert-file=/data/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/data/kubernetes/ssl/ca-key.pem \
--root-ca-file=/data/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/data/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s"
EOF

]# cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit] 
Description=Kubernetes Controller Manager 
Documentation=https://github.com/kubernetes/kubernetes 

[Service] 
EnvironmentFile=/data/kubernetes/cfg/kube-controller-manager.conf 
ExecStart=/data/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install] 
WantedBy=multi-user.target
EOF

]# systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager

在master部署kube-scheduler组件

]# cat > kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--leader-elect \
--master=127.0.0.1:8080 \
--bind-address=127.0.0.1"
EOF

]# cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler 
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/data/kubernetes/cfg/kube-scheduler.conf 
ExecStart=/data/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS 
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

]# systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler
]# kubectl get cs  #查看集群状态

在master部署kubelet组件

]# cat > kubelet.conf  << EOF
[root@k8s-master cfg]# cat kubelet.conf 
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--hostname-override=k8s-master \
--network-plugin=cni \
--kubeconfig=/data/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/data/kubernetes/cfg/bootstrap.kubeconfig \
--config=/data/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/data/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF

]# cat > kubelet-config.yaml << EOF
kind: KubeletConfiguration 
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0 
port: 10250 
readOnlyPort: 10255 
cgroupDriver: cgroupfs 
clusterDNS:
- 10.0.0.2 
clusterDomain: cluster.local
failSwapOn: false 
authentication:
  anonymous:
    enabled: false 
  webhook: 
    cacheTTL: 2m0s 
    enabled: true 
  x509: 
    clientCAFile: /data/kubernetes/ssl/ca.pem 
authorization: 
  mode: Webhook 
  webhook: 
    cacheAuthorizedTTL: 5m0s 
    cacheUnauthorizedTTL: 30s 
evictionHard: 
imagefs.available: 15% 
memory.available: 100Mi 
nodefs.available: 10% 
nodefs.inodesFree: 5% 
maxOpenFiles: 1000000 
maxPods: 110
EOF

生成bootstrap.kubeconfig 文件

]# KUBE_APISERVER="https://172.16.100.30:6443"
]# TOKEN="5a7a05908e6fff5ea912d4a2758be58c"
]# kubectl config set-cluster kubernetes --certificate-authority=/data/kubernetes/ssl/ca.pem  --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=bootstrap.kubeconfig
]# kubectl config set-credentials "kubelet-bootstrap" --token=${TOKEN} --kubeconfig=bootstrap.kubeconfig
]# kubectl config set-context default --cluster=kubernetes --user="kubelet-bootstrap" --kubeconfig=bootstrap.kubeconfig
]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
]# cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/data/kubernetes/cfg/kubelet.conf
ExecStart=/data/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

]# systemctl daemon-reload && systemctl enable kubelet && systemctl start kubelet
]# kubectl get csr   #查看 kubelet 证书请求
]# kubectl certificate approve <csrNAME>
]# kuebctl get node   #查看node状态

在master部署kube-proxy组件
生成kube-proxy的ssl证书

]# cat > /data/TLS/k8s/kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN", 
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
]# cp -r  /data/TLS/k8s/kube-proxy*.pem  /data/kubernetes/ssl/
]# cat > kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--config=/data/kubernetes/cfg/kube-proxy-config.yml"
EOF
]# cat > kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
 kubeconfig: /data/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master
clusterCIDR: 10.0.0.0/24
EOF

生成proxy.kubeconfig 文件

]# KUBE_APISERVER="https://172.16.100.30:6443"
]# kubectl config set-cluster kubernetes --certificate-authority=/data/kubernetes/ssl/ca.pem  --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=kube-proxy.kubeconfig
]# kubectl config set-credentials kube-proxy --client-certificate=/data/kubernetes/ssl/kube-proxy.pem --client-key=/data/kubernetes/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
]# kubectl config set-context default --cluster=kubernetes --user="kube-proxy" --kubeconfig=kube-proxy.kubeconfig
]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

]# systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy

部署CNI网络calico

]# mkdir -p /data/calico/{ssl,logs,cfg}
]# cd  /data/calico/cfg
]# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O calico-etcd.yml
]# vim calico-etcd.yml
---
apiVersion: v1
kind: Secret
type: Opaque
metadata:
  name: calico-etcd-secrets
  namespace: kube-system
data:
  #etcd的ssl证书:ca.pem\server.pem\server-key.pem, 通过base64 -w 0去转换,将结果输入以下:
  #例:cat ca.pem | base64 -w  0
  etcd-key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBNGNUY2Y4em9rQXdZRVFXSnhHQjFPNzVaVWF5U1hlTExCQm5DSmdhSUtJYy83VXJrCjkrWHFZbkJvUm1JNXQzUjFGMlZnemhnZ2N0NnZKUGhaUGpFRGh2anI1NEptaVZ4UGNFZTRPQlJjTHA3dTZJL1IKV3puRXVDZm5zWXBlamgxT0hsbXZ1RFlPN2JudGJGQllXQUxjT2F5NFdzMmNBazZuelFlNHZycFBZcG03ZWN1aQp2bmx4VkFFRU1lV05LWm93RWNzbis2S2FOMjNxSHpZdWRwNUJZbDBiTzZNVzREeEJSMVYxei8raWkrTFRGVnN0CmVWQmNlSGJoNVZrblloZ1lGV0tCU3ZxalRoeTFVbVhyZitDRXppSVhOT0U0VFlCNVdhTWhWZkZ2QmpBYzVEOHEKUG1MS3VLZ2tNSlo5dTRwV0pVTnpuN250UXQ5RG9BOHUrL0w2ZXdJREFRQUJBb0lCQVFDcTlRRHJIV1MxUHhNeQpRSGxUNUo2aFFNQXQ0bmxxOG9NOGRhellVblhrQ3BaVHZ6U21xc2pUQmI5UUhLMEx4L21xWDYvd0g4RGlldEV4Ck00V1FYRmtKYVpCbzNBdDgxQk9yT0FPOUkxMnlSOU1zODBwYXcrRzhlU3N1KzFJaVZ2cUNiUE5za0RLNXZPS1YKOURrUlhBa2EremtXT1Q5N1Y4Z2tyMFlyMXJTcU9jYmhxaWhNRWpBUTNiM3VSN1BDc01WMU9aSTJyV0xYeTlObAoxU0tIeDVvMXFRSFlaWHFaUnk0ZU9xOHFOdVhLeDZPSDZhdzJwcGVaYS9HcmJ6dm1GdWpvS0VQV1FsM0NlSng4CjdmSkU5WXg0dldvVlVxTkpxZHdxUjRiSlNZbWMxdVVkRzJNVkRCS0dHTnZiU0o4RVZCUzNUR0k2Z2QzWTN6eGQKbWVoT3hvcUJBb0dCQVBTRm40ZU51MjlsT1V3cWxkckorSlZHZUtlYlMrMEZTMVU2Wk4vTUpqUWFTTVJ5bVBhZwpZOHdNaEFvemlEQjBjSWJBS1ZGQWpBQXdZYWdSbWRSZm1vdEZVVjV2ZmwvM0hXcnNvOHhPamhlaFp0cml2ZHRvCmdrdlcyTUR4RU8vSDZFTkZ2VlI3RUtTbldNVCtFOXlzV0orZmlGVklFVjlsSDBZaUxIZjl6bnFiQW9HQkFPeGQKNHhJMm1aRU1ENi8xM2VzMWRsVnI3S1NXbStXZFJvVStrd0trN1kzZzdBMlJRaElrVks0SFlmOStMM1BRODhHbQoxQTVNdExxS0NGclFlTjhwWkc3VFYzMHVkeUtqOE5kMHpHbUY4Y0hySXFrNW9aSFhOcXBKTzR6b2tFd0pocjVwCnE1dTNZTUtSWS84Q2MwNThjb2ZIU25lL0ZtdCs4ei8yQWMwSmVnMmhBb0dBYW5na1ZtbW9TNERQeWhKZzNidEQKdWZ2TlhXMkpTZE1jVWlmeTlGOTM0d2Z1MTFydXI3UjJ2OHBUVS8zTU53ejhVakFwelc5RmhtK0tsaHZUMTEwcApkYXJoR3pXQTJWaElQdDU3RStMQWpCbURKNXZDLzE0cUhjdVc1YXdScTlabms2TXlKUzdRdUdFRmpnRHp0UXAyCkxFclNtZytmUU9KUEU4S2Rpa0hCUGpFQ2dZRUEwT0NMT040dFNVUEtYU28rYVl2K1BiQzVHQjNNT05hS3FsZEkKM081WXk2ZDNrdW5KNUhSY3JNbnpiUy9heVZOZkJjUGk0NXdmbmpVNit0Mzk0dUFXVStYS0MrTFMvemEzTC8rVQpZTEF3bTdpcUViZlBNeTFucm9ZMjdPZmNGSVhhb0V5TGpYazVOZGY3OFMvK0s5N0g2M3RQTUpFYVEvYVZDZkhoClY0dEhZK0VDZ1lFQXFCeW1qaW43ZWVlVHBMMU95OTR1bGVqTnNzZy8zcmQrTk90eHVxdG1TZ2hOTW5aS3ZtQWoKNWYzMElPbXNGNUhoQU9aRDlFNTIxcDNOMGpVeXRGS25wSVVPMmlxWTVwNTNDWFVMRUxZNHFhZDlZdGZ6SHEvOQp3cG1zOTB2ZlhQKy9Kd1NQWUpsM0prZ1g4THVQck9ibnQ4eFB5ckVjekZzZHh5bmZLcmpacFRzPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="
  etcd-cert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURyekNDQXBlZ0F3SUJBZ0lVSEd0MDRQQUM2dnRuMkQwMHprVk9GTFZ4KzJJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1F6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFNVEIyVjBZMlFnUTBFd0hoY05Nakl3TXpJek1EWXdNREF3V2hjTk16SXdNekl3Ck1EWXdNREF3V2pCQU1Rc3dDUVlEVlFRR0V3SkRUakVRTUE0R0ExVUVDQk1IUW1WcFNtbHVaekVRTUE0R0ExVUUKQnhNSFFtVnBTbWx1WnpFTk1Bc0dBMVVFQXhNRVpYUmpaRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUApBRENDQVFvQ2dnRUJBT0hFM0gvTTZKQU1HQkVGaWNSZ2RUdStXVkdza2wzaXl3UVp3aVlHaUNpSFArMUs1UGZsCjZtSndhRVppT2JkMGRSZGxZTTRZSUhMZXJ5VDRXVDR4QTRiNDYrZUNab2xjVDNCSHVEZ1VYQzZlN3VpUDBWczUKeExnbjU3R0tYbzRkVGg1WnI3ZzJEdTI1N1d4UVdGZ0MzRG1zdUZyTm5BSk9wODBIdUw2NlQyS1p1M25Mb3I1NQpjVlFCQkRIbGpTbWFNQkhMSi91aW1qZHQ2aDgyTG5hZVFXSmRHenVqRnVBOFFVZFZkYy8vb292aTB4VmJMWGxRClhIaDI0ZVZaSjJJWUdCVmlnVXI2bzA0Y3RWSmw2My9naE00aUZ6VGhPRTJBZVZtaklWWHhid1l3SE9RL0tqNWkKeXJpb0pEQ1dmYnVLVmlWRGM1KzU3VUxmUTZBUEx2dnkrbnNDQXdFQUFhT0JuVENCbWpBT0JnTlZIUThCQWY4RQpCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDCk1BQXdIUVlEVlIwT0JCWUVGQm4zSUZ5eWFXWUMrVXJaOW5XUnF1YVJGd0dyTUI4R0ExVWRJd1FZTUJhQUZPYU0KTU0rZ0pnNFRqUUZ2dmNVOXJxY0FjNXAwTUJzR0ExVWRFUVFVTUJLSEJLd1FaQjZIQkt3UVpCK0hCS3dRWkNBdwpEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBR25Zalp0OWVFMTJXUlJyRVgxTVpHQWZ4NHNTVHlGa2FqeWQvYTlsCnNiSU42UHN2R1VERjFRbnlNaE1raVdubHc0UFdnZTN0b1pRdGpGYTYyMzlTdTdSVWI1d1FldWZWSkFXcmRwT08KTitOVjZJazVSWWpGdzFDUEFWaE5WN0IwVS9BUHVKOWhBR3N5Ui9VdHJ6ekZ4SWVIc25rTTY2RDN5M25QVFdTVgpFa1lZejdIcU5zb1lOSW1MckpHbmFCM2o5OUFLSG4zanJ4cXU4bDduYy9EcGpkNDhZRUM4WXBFejZJTDAzcnRWCkpZN2JuQUVScE9yYmJCbWZvck9wRWEzRUpYOEh6VStTSTBwVHA0dXQ3RUpsR3h3ZHgxbDhiU1kwakZFNkxzTDAKZS9pMmljdldnNlJFRU53emlPWkxlRXY3WmN2bHEzaktKcFMxWWwvN2NETk5QQmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
  etcd-ca: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWakNDQWo2Z0F3SUJBZ0lVVFcraEs4NmNoOGFhQzdVNFg2MCtVdmw3eXlBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1F6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFNVEIyVjBZMlFnUTBFd0hoY05Nakl3TXpJek1ETTFNREF3V2hjTk1qY3dNekl5Ck1ETTFNREF3V2pCRE1Rc3dDUVlEVlFRR0V3SkRUakVRTUE0R0ExVUVDQk1IUW1WcGFtbHVaekVRTUE0R0ExVUUKQnhNSFFtVnBhbWx1WnpFUU1BNEdBMVVFQXhNSFpYUmpaQ0JEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUxLV2pvWWR5M2NacUdDbk12blFQdGJPM2N2OUJXaGtVd0hrcENlZlM5cU5aenNECisrWUhhVElxL3V3U3cwL2g0aXRxc0RzZloxb0ZVYTd1dFU2Rzd2WEtWZXp2TC9ReTNETVQyY2lQWXNUWFJqVXEKbDQ1eWFVRnFGSkJTUklZWmcrci8ycmI1U3ROM2JTdlAvMnNvTVhHcUphaHM2aGdoKzVLRm5pRkwva3kwcVdGaQpLcFB2am1lUFpuTTJCTi9PaGdJbUczZW1wUytPdDBYS1NxNkNlNzkzS0hzSlJlRHAxRE5mTjRJZjFvUFFJclkrCnBTbjBRbVdFNUlqYXdjZEJlVWtPUFZEQWltVEY4V09wQjNVdU91SVFBMHdycHRQNnRidXVaQjZKUUlIaWtHT2kKSzhYMm91YTNHWkF6K1ByRGhYUElsaXpFTDBzT1FvYU8rdGVlVDQwQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFILwpCQVFEQWdFR01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZPYU1NTStnSmc0VGpRRnZ2Y1U5CnJxY0FjNXAwTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBc21Pc1dEdWVvZEI2bEZ5RzRiRlp2eG1jWEJ5ZUMKVmlrZmdJQy9WVTNTQkZ2ZUk2M1JRY1ZoVVcvM3N0cWhPdmNCK0doRGMreklLY1hRMWJEQi85RWI1Y09XeUhOYgpoMDZmTE9mWTFJNjcwMDZqaVA3ZTdLNVlGSWdudjVzS2d1OENWelhjVUd2OXN3QjRnWGtzTEdYQVR4SURJSDBxCjNKQ3o5TEVrdkpKUDdnd0VHR0RBRHk2SEk0TXhDbmdKUDUvUEpvaXQ5NFhFdWI5TWwrdjY1b1gzT09WTVU3RG0KNlhWRFR2dzVCWWlVOTNFRG4rZ2xsWDJweFoybTgwbGpFc0Q0R1BZb2ptU0RlYXZpajA5K1U3cEVLN3JYRUJsdQpsMHl6eVIvNnNtK29OaHhPVUg0V0tiVmVpVWYvd3YyR1p6eldwN01tTWs5N2lQNlA3VVozVm5hbAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="  
  
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  etcd_endpoints: "https://172.16.100.30:2379,https://172.16.100.31:2379,https://172.16.100.32:2379" #ETCD集群
  etcd_ca: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
  etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
  etcd_key: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"

  typha_service_name: "none"

  calico_backend: "bird"
  veth_mtu: "0"

  cni_network_config: |-
    {
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "calico",
          "log_level": "info",
          "log_file_path": "/data/calico/logs/cni.log",
          "etcd_endpoints": "__ETCD_ENDPOINTS__",
          "etcd_key_file": "__ETCD_KEY_FILE__",
          "etcd_cert_file": "__ETCD_CERT_FILE__",
          "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
          "mtu": __CNI_MTU__,
          "ipam": {
              "type": "calico-ipam"
          },
          "policy": {
              "type": "k8s"
          },
          "kubernetes": {
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
          "type": "portmap",
          "snat": true,
          "capabilities": {"portMappings": true}
        },
        {
          "type": "bandwidth",
          "capabilities": {"bandwidth": true}
        }
      ]
    }

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
      - serviceaccounts
    verbs:
      - watch
      - list
      - get
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
	  
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  - apiGroups: ["discovery.k8s.io"]
    resources:
      - endpointslices
    verbs:
      - watch 
      - list
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      - watch
      - list
  - apiGroups: [""]
    resources:
      - configmaps
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      - patch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        - effect: NoSchedule
          operator: Exists
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        - name: install-cni
          image: docker.io/calico/cni:v3.22.1
          command: ["/opt/cni/bin/install"]
          envFrom:
          - configMapRef:
              name: kubernetes-services-endpoint
              optional: true
          env:
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
            - mountPath: /calico-secrets
              name: etcd-certs
          securityContext:
            privileged: true
        - name: flexvol-driver
          image: docker.io/calico/pod2daemon-flexvol:v3.22.1
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
          securityContext:
            privileged: true
      containers:
        - name: calico-node
          image: docker.io/calico/node:v3.22.1
          envFrom:
          - configMapRef:
              name: kubernetes-services-endpoint
              optional: true
          env:
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            - name: CALICO_K8S_NODE_REF
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            - name: IP
              value: "autodetect"
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            - name: CALICO_IPV4POOL_VXLAN
              value: "Never"
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            - name: FELIX_VXLANMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            - name: FELIX_WIREGUARDMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
			#修改k8s的IP
            - name: CALICO_IPV4POOL_CIDR
              value: "10.0.0.0/24"
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            - name: FELIX_IPV6SUPPORT
              value: "false"
            - name: FELIX_HEALTHENABLED
              value: "true"
			#新增以下:
            - name: KUBERNETES_SERVICE_HOST
              value: "172.16.100.30"
            - name: KUBERNETES_SERVICE_PORT
              value: "6443"
            - name: KUBERNETES_SERVICE_PORT_HTTPS
              value: "6443"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          lifecycle:
            preStop:
              exec:
                command:
                - /bin/calico-node
                - -shutdown
          livenessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-live
              - -bird-live
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
            timeoutSeconds: 10
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-ready
              - -bird-ready
            periodSeconds: 10
            timeoutSeconds: 10
          volumeMounts:
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
              readOnly: false
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - mountPath: /calico-secrets
              name: etcd-certs
            - name: policysync
              mountPath: /var/run/nodeagent
            - name: sysfs
              mountPath: /sys/fs/
              mountPropagation: Bidirectional
            - name: cni-log-dir
              mountPath: /var/log/calico/cni
              readOnly: true
      volumes:
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        - name: sysfs
          hostPath:
            path: /sys/fs/
            type: DirectoryOrCreate
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        - name: cni-log-dir
          hostPath:
            path: /var/log/calico/cni
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
			
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      tolerations:
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      hostNetwork: true
      containers:
        - name: calico-kube-controllers
          image: docker.io/calico/kube-controllers:v3.22.1
          env:
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            - name: ENABLED_CONTROLLERS
              value: policy,namespace,serviceaccount,workloadendpoint,node
          volumeMounts:
            - mountPath: /calico-secrets
              name: etcd-certs
          livenessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -l
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
            timeoutSeconds: 10
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r
            periodSeconds: 10
      volumes:
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0440

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system

---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers


]# kubectl apply -f  calico-etcd.yml
]# kubectl get pods -n kube-system
]# kubectl get node

##授权 apiserver 访问 kubelet
]# cat > apiserver-to-kubelet-rbac.yml << EOF
apiVersion: rbac.authorization.k8s.io/v1 
kind: ClusterRole
metadata: 
  annotations: 
    rbac.authorization.kubernetes.io/autoupdate: "true" 
  labels: 
    kubernetes.io/bootstrapping: rbac-defaults 
  name: system:kube-apiserver-to-kubelet 
rules: 
  - apiGroups: 
      - "" 
    resources: 
      - nodes/proxy 
      - nodes/stats 
      - nodes/log 
      - nodes/spec 
      - nodes/metrics 
      - pods/log 
    verbs: 
      - "*" 
--- 
apiVersion: rbac.authorization.k8s.io/v1 
kind: ClusterRoleBinding 
metadata: 
  name: system:kube-apiserver 
  namespace: "" 
roleRef: 
  apiGroup: rbac.authorization.k8s.io 
  kind: ClusterRole 
  name: system:kube-apiserver-to-kubelet 
subjects:
  - apiGroup: rbac.authorization.k8s.io 
    kind: User 
    name: kubernetes
EOF
]# kubectl apply -f apiserver-to-kubelet-rbac.yml

在worker node部署kubelet、kube-proxy组件
##复制mater上的kubernetes过来,修改kubelet.conf和kube-proxy-config.yml的hostname
##生成新的kubelet证书和kubeconfig文件

]# KUBE_APISERVER="https://172.16.100.30:6443"
]# TOKEN="5a7a05908e6fff5ea912d4a2758be58c"
]# kubectl config set-cluster kubernetes --certificate-authority=/data/kubernetes/ssl/ca.pem  --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=bootstrap.kubeconfig
]# kubectl config set-credentials "kubelet-bootstrap" --token=${TOKEN} --kubeconfig=bootstrap.kubeconfig
]# kubectl config set-context default --cluster=kubernetes --user="kubelet-bootstrap" --kubeconfig=bootstrap.kubeconfig
]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

]# KUBE_APISERVER="https://172.16.100.30:6443"
]# kubectl config set-cluster kubernetes --certificate-authority=/data/kubernetes/ssl/ca.pem  --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=kube-proxy.kubeconfig
]# kubectl config set-credentials kube-proxy --client-certificate=/data/kubernetes/ssl/kube-proxy.pem --client-key=/data/kubernetes/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
]# kubectl config set-context default --cluster=kubernetes --user="kube-proxy" --kubeconfig=kube-proxy.kubeconfig
]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
]# cat > kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--config=/data/kubernetes/cfg/kube-proxy-config.yml"
EOF
]# cat > kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
 kubeconfig: /data/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-node1
clusterCIDR: 10.0.0.0/24
EOF
]# cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/data/kubernetes/cfg/kube-proxy.conf
ExecStart=/data/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS 
Restart=on-failure LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
]# systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy

]# cat > kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/data/kubernetes/logs \
--hostname-override=k8s-node1 \  
--network-plugin=cni \
--kubeconfig=/data/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/data/kubernetes/cfg/bootstrap.kubeconfig \
--config=/data/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/data/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF
]# cat > kubelet-config.yml << EOF
kind: KubeletConfiguration 
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0 
port: 10250 
readOnlyPort: 10255 
cgroupDriver: cgroupfs 
clusterDNS:
- 10.0.0.2 
clusterDomain: cluster.local
failSwapOn: false 
authentication:
  anonymous:
    enabled: false 
  webhook: 
    cacheTTL: 2m0s 
    enabled: true 
  x509: 
    clientCAFile: /data/kubernetes/ssl/ca.pem 
authorization: 
  mode: Webhook 
  webhook: 
    cacheAuthorizedTTL: 5m0s 
    cacheUnauthorizedTTL: 30s 
evictionHard: 
imagefs.available: 15% 
memory.available: 100Mi 
nodefs.available: 10% 
nodefs.inodesFree: 5% 
maxOpenFiles: 1000000 
maxPods: 110
EOF
]# systemctl daemon-reload && systemctl enable kubelet && systemctl start kubelet

##以下在master上面执行
]# kubectl get csr #查看 kubelet 证书请求
]# kubectl certificate approve
]# kuebctl get node #查看node状态
注:后续继续添加worker node执行同样的操作

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐