简介

使用二节点master,worker演示二进制安装,集群安装方式雷同

网络设置

非虚拟机可以跳过
参照集群搭建之环境准备(一)设置一个网络,比如10.3.0.0/16,并将2结点加入此网络

基础环境设置

运行如下脚本

#最小化安装没有yum-utils,sshpass用于命令行输入密码
yum -y install yum-utils sshpass
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache
#安装docker
yum -y install docker-ce
#设置docker镜像加速
echo '{"registry-mirrors": ["http://hub-mirror.c.163.com"]}'>/etc/docker/daemon.json
#重启生效
systemctl start docker & systemctl enable docker
#禁用swap,否则kubelet无法正常使用
sed -i '/ swap / s/^/#/' /etc/fstab
swapoff -a
#关闭防火墙,各种端口访问简化设置
systemctl stop firewalld & systemctl disable firewalld

#目前kubelet还不支持selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

#加载 ipvs
cat << EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
 modprobe -- ip_vs
 modprobe -- ip_vs_rr
 modprobe -- ip_vs_wrr
 modprobe -- ip_vs_sh
 modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#创建k8s程序,配置,证书相关目录
mkdir /k8s/{bin,cfg,ssl,data,addons} -p
#添加到环境变量PATH
cat>>/etc/profile<<'EOF'
PATH=/k8s/bin:$PATH
EOF
source /etc/profile

下载安装相关软件

#证书相关

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
#k8s相关
wget https://github.com/etcd-io/etcd/releases/download/v3.3.11/etcd-v3.3.11-linux-amd64.tar.gz
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
wget https://dl.k8s.io/v1.15.0/kubernetes-server-linux-amd64.tar.gz

生成证书

生成ca证书

cd /k8s/ssl
#cfssl print-defaults config > ca-config.json
cat << EOF >ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "k8s": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
#ca csr
#cfssl print-defaults csr > ca-csr.json
cat << EOF >ca-csr.json
{
    "CN": "ca",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Chengdu",
            "ST": "Sichuan"
        }
    ]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca 

生成server证书

cat << EOF >server-csr.json
{
    "CN": "server",
    "hosts": [
    "10.3.0.4",
    "10.254.0.1"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Chengdu",
            "ST": "Sichuan"
        }
    ]
}
EOF
cfssl gencert -config=ca-config.json -profile=k8s  -ca=ca.pem -ca-key=ca-key.pem server-csr.json | cfssljson -bare server
  1. 生成kube-proxy证书
#内置system:node-proxier角色用户system:kube-proxy
cat << EOF >kube-proxy-csr.json
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Chengdu",
            "ST": "Sichuan"
        }
    ]
}
EOF
cfssl gencert -config=ca-config.json -profile=k8s  -ca=ca.pem -ca-key=ca-key.pem kube-proxy-csr.json | cfssljson -bare kube-proxy
  1. 生成kubectl(集群管理员)证书
cat << EOF >admin-csr.json
{
    "CN": "system:admin",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Chengdu",
            "ST": "Sichuan"
        }
    ]
}
EOF
cfssl gencert -config=ca-config.json -profile=k8s  -ca=ca.pem -ca-key=ca-key.pem admin-csr.json | cfssljson -bare admin

安装etcd

etcd_ip=10.3.0.4
cd /root
tar -zxf etcd-v3.3.11-linux-amd64.tar.gz
cd etcd-v3.3.11-linux-amd64
mkdir -p /k8s/data/etcd
\cp etcd etcdctl /k8s/bin
#创建配置文件
cat <<EOF >/k8s/cfg/etcd.conf 
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/k8s/data/etcd"
ETCD_LISTEN_PEER_URLS="https://$etcd_ip:2380"
ETCD_LISTEN_CLIENT_URLS="https://$etcd_ip:2379"
 
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$etcd_ip:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://$etcd_ip:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://$etcd_ip:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

#[Security]
ETCD_CERT_FILE="/k8s/ssl/server.pem"
ETCD_KEY_FILE="/k8s/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/k8s/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/k8s/ssl/server.pem"
ETCD_PEER_KEY_FILE="/k8s/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/k8s/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
EOF

#创建启动文件
cat  >/usr/lib/systemd/system/etcd.service <<'EOF'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/k8s/data/etcd
EnvironmentFile=/k8s/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /k8s/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" --listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" --advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" --initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" --initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" --initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\" --cert-file=\"${ETCD_CERT_FILE}\" --key-file=\"${ETCD_KEY_FILE}\" --trusted-ca-file=\"${ETCD_TRUSTED_CA_FILE}\" --client-cert-auth=\"${ETCD_CLIENT_CERT_AUTH}\" --peer-cert-file=\"${ETCD_PEER_CERT_FILE}\" --peer-key-file=\"${ETCD_PEER_KEY_FILE}\" --peer-trusted-ca-file=\"${ETCD_PEER_TRUSTED_CA_FILE}\" --peer-client-cert-auth=\"${ETCD_PEER_CLIENT_CERT_AUTH}\""
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
#设置etcd自启动
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
#检查etcd是否安装成功
etcdctl --ca-file=/k8s/ssl/ca.pem --cert-file=/k8s/ssl/server.pem --key-file=/k8s/ssl/server-key.pem --endpoints="https://$etcd_ip:2379" cluster-health

安装apiserver

etcd_ip=10.3.0.4
api_server_ip=10.3.0.4
cidr=10.254.0.0/16
cd /root
tar -zxf kubernetes-server-linux-amd64.tar.gz 
cd kubernetes/server/bin/
\cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/bin
#生成bootstrap Token
echo `head -c 16 /dev/urandom | od -An -t x | tr -d ' '`',kubelet-bootstrap,10001,"system:bootstrappers"'>/k8s/cfg/token.csv

#创建配置文件
cat> /k8s/cfg/kube-apiserver <<EOF
KUBE_APISERVER_OPTS="--advertise-address=$api_server_ip \
--allow-privileged=true \
--authorization-mode=RBAC,Node \
--client-ca-file=/k8s/ssl/ca.pem \
--enable-admission-plugins=NodeRestriction \
--enable-bootstrap-token-auth \
--etcd-cafile=/k8s/ssl/ca.pem \
--etcd-certfile=/k8s/ssl/server.pem \
--etcd-keyfile=/k8s/ssl/server-key.pem \
--etcd-servers=https://$etcd_ip:2379 \
--insecure-port=0 \
--kubelet-client-certificate=/k8s/ssl/admin.pem \
--kubelet-client-key=/k8s/ssl/admin-key.pem \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--proxy-client-cert-file=/k8s/ssl/admin.pem \
--proxy-client-key-file=/k8s/ssl/admin-key.pem \
--requestheader-allowed-names=front-proxy-client \
--requestheader-client-ca-file=/k8s/ssl/ca.pem \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--secure-port=6443 \
--service-account-key-file=/k8s/ssl/ca-key.pem \
--service-cluster-ip-range=$cidr \
--tls-cert-file=/k8s/ssl/server.pem  \
--tls-private-key-file=/k8s/ssl/server-key.pem \
--token-auth-file=/k8s/cfg/token.csv"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/kube-apiserver.service <<'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/cfg/kube-apiserver
ExecStart=/k8s/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF
#设置自启动
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver

安装scheduler

#创建配置文件
cat>/k8s/cfg/kube-scheduler<<EOF
KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/kube-scheduler.service<<'EOF'
 
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/cfg/kube-scheduler
ExecStart=/k8s/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF
#设置自启动
systemctl daemon-reload
systemctl enable kube-scheduler.service 
systemctl start kube-scheduler.service

安装controller manager

cidr=10.254.0.0/16
#创建配置文件
cat>/k8s/cfg/kube-controller-manager<<EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=$cidr \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/k8s/ssl/ca.pem \
--cluster-signing-key-file=/k8s/ssl/ca-key.pem  \
--root-ca-file=/k8s/ssl/ca.pem \
--service-account-private-key-file=/k8s/ssl/ca-key.pem"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/kube-controller-manager.service<<'EOF'
 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/cfg/kube-controller-manager
ExecStart=/k8s/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF
#设置自启动
systemctl daemon-reload
systemctl enable kube-controller-manager.service 
systemctl start kube-controller-manager.service
# 验证集群安装
kubectl get cs,nodes
# 这一步一定要先于kubelet启动
kubectl create clusterrolebinding create-csrs-for-bootstrapping --clusterrole=system:node-bootstrapper --group=system:bootstrappers
# 为kubelet授权,为admin授权
kubectl create clusterrolebinding admin --clusterrole=cluster-admin --user=system:admin

执行机准备工作

先执行开头的基础环境初使化(略)
从master节点拷贝证书及可执行程序文件

worker_ip=10.3.0.5
pass=1
sshpass -p$pass ssh-copy-id root@$worker_ip -o StrictHostKeyChecking=no
cd /root/kubernetes/server/bin
scp /k8s/ssl/*.pem root@$worker_ip:/k8s/ssl
scp kube-proxy root@$worker_ip:/k8s/bin
scp kubelet root@$worker_ip:/k8s/bin
scp kubectl root@$worker_ip:/k8s/bin

生成kubelet启动权限配置

KUBE_APISERVER="https://10.3.0.4:6443"
BOOTSTRAP_TOKEN=08c6fc5b6e7ba0fc4e44a6520139248d
cat>/k8s/cfg/environment.sh<<EOF
#!/bin/bash
#创建kubelet bootstrapping kubeconfig 
#设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/k8s/cfg/bootstrap.kubeconfig
 
#设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=/k8s/cfg/bootstrap.kubeconfig
  
# 设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=/k8s/cfg/bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=/k8s/cfg/bootstrap.kubeconfig

#----------------------
 
# 创建kube-proxy kubeconfig文件
 
kubectl config set-cluster kubernetes --certificate-authority=/k8s/ssl/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=/k8s/cfg/kube-proxy.kubeconfig
 
kubectl config set-credentials kube-proxy --client-certificate=/k8s/ssl/kube-proxy.pem --client-key=/k8s/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=/k8s/cfg/kube-proxy.kubeconfig
 
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=/k8s/cfg/kube-proxy.kubeconfig
 
# 设置默认上下文
kubectl config use-context default --kubeconfig=/k8s/cfg/kube-proxy.kubeconfig

#----------------------
 
# 创建kubectl kubeconfig文件
 
kubectl config set-cluster kubernetes --certificate-authority=/k8s/ssl/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=/etc/kubernetes/admin.conf
 
kubectl config set-credentials admin --client-certificate=/k8s/ssl/admin.pem --client-key=/k8s/ssl/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.conf
 
kubectl config set-context default --cluster=kubernetes --user=admin --kubeconfig=/etc/kubernetes/admin.conf
 
# 设置默认上下文
kubectl config use-context default --kubeconfig=/etc/kubernetes/admin.conf
EOF
sh /k8s/cfg/environment.sh 

kubelet安装

#创建参数模板配置文件
worker_ip=10.3.0.5
cat>/k8s/cfg/kubelet.config<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: $worker_ip
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.254.0.10"]
clusterDomain: cluster.local.
failSwapOn: true
authentication:
  anonymous:
    enabled: true
EOF
#创建配置文件
cat>/k8s/cfg/kubelet<<EOF
 KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=$worker_ip \
--kubeconfig=/k8s/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/k8s/cfg/bootstrap.kubeconfig \
--config=/k8s/cfg/kubelet.config \
--cert-dir=/k8s/ssl \
--client-ca-file=/k8s/ssl/ca.pem \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/kubelet.service<<'EOF'
 
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
 
[Service]
EnvironmentFile=/k8s/cfg/kubelet
ExecStart=/k8s/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
 
[Install]
WantedBy=multi-user.target
EOF
#设置自启动
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet

master节点批准证书

kubectl certificate approve `kubectl get csr|sed '1d'|awk '{print $1}'`

kube-proxy安装

#创建配置文件
worker_ip=10.3.0.5
cidr=10.254.0.0/16
cat>/k8s/cfg/kube-proxy<<EOF
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=$worker_ip \
--cluster-cidr=$cidr \
--kubeconfig=/k8s/cfg/kube-proxy.kubeconfig"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/kube-proxy.service<<'EOF'
 
[Unit]
Description=Kubernetes Proxy
After=network.target
 
[Service]
EnvironmentFile=-/k8s/cfg/kube-proxy
ExecStart=/k8s/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF
#设置自启动
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy

flanneld安装

master节点上运行

#etcd注册网段
etcd_ip=10.3.0.4
cidr=10.254.0.0/16
etcdctl --ca-file=/k8s/ssl/ca.pem --cert-file=/k8s/ssl/server.pem --key-file=/k8s/ssl/server-key.pem --endpoints="https://$etcd_ip:2379"  set /k8s/network/config  '{ "Network": "'$cidr'", "Backend": {"Type": "vxlan"}}'

worker节点上运行

etcd_ip=10.3.0.4
#创建bin文件
cd /root
tar xzf flannel-v0.11.0-linux-amd64.tar.gz
mv -f flanneld mk-docker-opts.sh /k8s/bin
##创建flanneld配置文件
cat>/k8s/cfg/flanneld<<EOF
FLANNEL_OPTIONS="--etcd-endpoints=https://$etcd_ip:2379 -etcd-cafile=/k8s/ssl/ca.pem -etcd-certfile=/k8s/ssl/server.pem -etcd-keyfile=/k8s/ssl/server-key.pem -etcd-prefix=/k8s/network"
EOF
#创建启动文件
cat>/usr/lib/systemd/system/flanneld.service<<'EOF'
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
 
[Service]
Type=notify
EnvironmentFile=/k8s/cfg/flanneld
ExecStart=/k8s/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
#在docker启动前添加环境变量
sed -i s#^ExecStart=.*\$#EnvironmentFile=/run/flannel/subnet.env\\nExecStart=/usr/bin/dockerd\ \$DOCKER_NETWORK_OPTIONS# /usr/lib/systemd/system/docker.service 

#设置自启动,flanneld必须优先docker启动
systemctl daemon-reload
systemctl stop docker
systemctl start flanneld
systemctl enable flanneld
systemctl start docker
systemctl restart kubelet
systemctl restart kube-proxy
#检查docker0网桥和flanneld是否在同一网段
ip a

coredns安装

cd /root/kubernetes
tar xzf kubernetes-src.tar.gz
cd cluster/addons/dns/coredns
cat>transforms2sed.sed<<EOF
s/__PILLAR__DNS__SERVER__/10.254.0.10/g
s/__PILLAR__DNS__DOMAIN__/cluster.local/g
s/__PILLAR__CLUSTER_CIDR__/10.254.0.0/g
s/__PILLAR__DNS__MEMORY__LIMIT__/200Mi/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: coredns.yaml.base/g
EOF
sed -f transforms2sed.sed coredns.yaml.base > /k8s/addons/coredns.yaml
#从部署文件读取合适的版本
cd /k8s/addons
coredns_version=`egrep -o 'image:.*' coredns.yaml |awk -F: '{print $3}'`
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$coredns_version
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$coredns_version k8s.gcr.io/coredns:$coredns_version
kubectl apply -f coredns.yaml

安装最新版coredns

cd /k8s/addons
wget https://github.com/coredns/deployment/archive/master.zip
unzip master.zip
cd deployment-master/kubernetes
./deploy.sh -s -r 10.254.0.0/16 -i 10.254.0.10 -d cluster.local > /k8s/addons/coredns.yaml
cd /k8s/addons
coredns_version=`egrep -o 'image:.*' coredns.yaml |awk -F: '{print $3}'`
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$coredns_version
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$coredns_version k8s.gcr.io/coredns:$coredns_version
kubectl apply -f coredns.yaml

测试dns

cat > my-nginx.yaml <<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      app: my-nginx
  replicas: 2
  template:
    metadata:
      labels:
        app: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx:1.17.2
        ports:
        - containerPort: 80
EOF
kubectl apply -f my-nginx.yaml
kubectl expose deploy my-nginx
docker pull infoblox/dnstools
kubectl run -it --rm --restart=Never --image=infoblox/dnstools dnstools
nslookup my-nginx
nslookup my-nginx

坑点

  1. Unable to connect to the server: x509: certificate has expired or is not yet valid(系统时间不对
ntpdate cn.pool.ntp.org
  1. server证书必须把所有主结点ip加进去,另外还有10.254.0.1
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐