所有节点安全措施
所有节点关闭安全措施
swapoff -a
sed -ri ‘s/.swap./#&/’ /etc/fstab
systemctl stop firewalld
systemctl disable firewalld
sed -i ‘s/enforcing/disabled/’ /etc/selinux/config
setenforce 0

主节点etcd密钥配置
主节点创建目录
mkdir k8s; cd k8s
mkdir etcd-cert k8s-cert
cd etcd-cert

下载ssl密钥生成器
curl -L http://dl.jxit.net.cn/k8s/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L http://dl.jxit.net.cn/k8s/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L http://dl.jxit.net.cn/k8s/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo

生成etcd的密钥 sh etcd-cert.sh
cat > ca-config.json <<EOF
{
“signing”: {
“default”: {
“expiry”: “87600h”
},
“profiles”: {
“www”: {
“expiry”: “87600h”,
“usages”: [
“signing”,
“key encipherment”,
“server auth”,
“client auth”
]
}
}
}}
EOF

cat > ca-csr.json <<EOF
{
“CN”: “etcd CA”,
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “Beijing”,
“ST”: “Beijing”
}
]}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
“CN”: “etcd”,
“hosts”: [
“10.10.10.11”,
“10.10.10.12”,
“10.10.10.13”
],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”
}
]}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

下载ETCD
yum -y install wget
wget http://dl.jxit.net.cn/k8s/etcd-v3.3.15-linux-amd64.tar.gz
tar -zxvf etcd-v3.3.15-linux-amd64.tar.gz
cd etcd-v3.3.15-linux-amd64
mkdir -p /opt/etcd/{ssl,cfg,bin}
mv etcd etcdctl /opt/etcd/bin/
cp /root/k8s/etcd-cert/{ca,server-key,server}.pem /opt/etcd/ssl

让ETCD用上面的密钥启动
sh ./etcd.sh etcd01 10.10.10.11
#!/bin/bash

example: ./etcd.sh etcd01 192.168.1.10 etcd02=https://192.168.1.11:2380,etcd03=https://192.168.1.12:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat <KaTeX parse error: Expected 'EOF', got '#' at position 19: …K_DIR/cfg/etcd #̲[Member] ETCD_N…{ETCD_NAME}"
ETCD_DATA_DIR=“/var/lib/etcd/default.etcd”
ETCD_LISTEN_PEER_URLS=“https:// E T C D I P : 2380 " E T C D L I S T E N C L I E N T U R L S = " h t t p s : / / {ETCD_IP}:2380" ETCD_LISTEN_CLIENT_URLS="https:// ETCDIP:2380"ETCDLISTENCLIENTURLS="https://{ETCD_IP}:2379”

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS=“https:// E T C D I P : 2380 " E T C D A D V E R T I S E C L I E N T U R L S = " h t t p s : / / {ETCD_IP}:2380" ETCD_ADVERTISE_CLIENT_URLS="https:// ETCDIP:2380"ETCDADVERTISECLIENTURLS="https://{ETCD_IP}:2379”
ETCD_INITIAL_CLUSTER=“etcd01=https:// E T C D I P : 2380 , {ETCD_IP}:2380, ETCDIP:2380,{ETCD_CLUSTER}”
ETCD_INITIAL_CLUSTER_TOKEN=“etcd-cluster”
ETCD_INITIAL_CLUSTER_STATE=“new”
EOF

cat </usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile= W O R K D I R / c f g / e t c d E x e c S t a r t = {WORK_DIR}/cfg/etcd ExecStart= WORKDIR/cfg/etcdExecStart={WORK_DIR}/bin/etcd
–name=${ETCD_NAME}
–data-dir=${ETCD_DATA_DIR}
–listen-peer-urls=${ETCD_LISTEN_PEER_URLS}
–listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379
–advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS}
–initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS}
–initial-cluster=${ETCD_INITIAL_CLUSTER}
–initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN}
–initial-cluster-state=new
–cert-file= W O R K D I R / s s l / s e r v e r . p e m   − − k e y − f i l e = {WORK_DIR}/ssl/server.pem \ --key-file= WORKDIR/ssl/server.pem keyfile={WORK_DIR}/ssl/server-key.pem
–peer-cert-file= W O R K D I R / s s l / s e r v e r . p e m   − − p e e r − k e y − f i l e = {WORK_DIR}/ssl/server.pem \ --peer-key-file= WORKDIR/ssl/server.pem peerkeyfile={WORK_DIR}/ssl/server-key.pem
–trusted-ca-file= W O R K D I R / s s l / c a . p e m   − − p e e r − t r u s t e d − c a − f i l e = {WORK_DIR}/ssl/ca.pem \ --peer-trusted-ca-file= WORKDIR/ssl/ca.pem peertrustedcafile={WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

检查etcd集群
/opt/etcd/bin/etcdctl
–ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem
–endpoints=“https://10.10.10.11:2379” cluster-health

工作节点虚拟网络
在ETCD中设置虚拟网络
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://10.10.10.11:2379” set /coreos.com/network/config ‘{ “Network”: “10.0.0.0/16”, “Backend”: {“Type”: “vxlan”}}’

工作节点安装flannel虚拟网络组件
yum -y install wget
wget http://dl.jxit.net.cn/k8s/flannel-v0.11.0-linux-amd64.tar.gz
tar -zxvf flannel-v0.11.0-linux-amd64.tar.gz
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/

从主节点把访问etcd证书拷过来
scp 10.10.10.11:/opt/etcd/ssl/* /opt/kubernetes/ssl

让flannel按照etcd中配置创建虚拟网络
sh ./flannel.sh https://10.10.10.11:2379
#!/bin/bash

ETCD_ENDPOINTS=${1:-“http://127.0.0.1:2379”}

cat </opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS=“–etcd-endpoints=${ETCD_ENDPOINTS}
-etcd-cafile=/opt/kubernetes/ssl/ca.pem
-etcd-certfile=/opt/kubernetes/ssl/server.pem
-etcd-keyfile=/opt/kubernetes/ssl/server-key.pem”

EOF

cat </usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
工作节点Docker对接虚拟网络
所有工作节点安装docker
sudo yum install -y wget yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
sudo yum -y install docker-ce
curl -sSL http://dl.jxit.net.cn/k8s/set_mirror.sh | sh -s http://hub-mirror.c.163.com
sudo systemctl enable docker

让Docker创建的容器使用虚拟网络通信
cat </usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl restart docker

查看网络配置
/opt/etcd/bin/etcdctl
–ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem
–endpoints=“https://10.10.10.11:2379” ls /coreos.com/network/subnets

主节点APIServer
安装k8s主节点组件
wget http://dl.jxit.net.cn/k8s/kubernetes-server-linux-amd64.tar.gz
tar -zxvf kubernetes-server-linux-amd64.tar.gz
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
cd /root/kubernetes/server/bin/
cp kube-controller-manager kube-apiserver kube-scheduler /opt/kubernetes/bin/
cp kubectl /usr/bin/

生成用于访问APIServer的密钥
cd /opt/kubernetes/ssl; sh k8s-cert.sh
cat > ca-config.json <<EOF
{
“signing”: {
“default”: {
“expiry”: “87600h”
},
“profiles”: {
“kubernetes”: {
“expiry”: “87600h”,
“usages”: [
“signing”,
“key encipherment”,
“server auth”,
“client auth”
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
“CN”: “kubernetes”,
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “Beijing”,
“ST”: “Beijing”,
“O”: “k8s”,
“OU”: “System”
}
]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
“CN”: “kubernetes”,
“hosts”: [
“10.0.0.1”,
“127.0.0.1”,
“10.10.10.11”,
“10.10.10.12”,
“10.10.10.13”,
“kubernetes”,
“kubernetes.default”,
“kubernetes.default.svc”,
“kubernetes.default.svc.cluster”,
“kubernetes.default.svc.cluster.local”
],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”,
“O”: “k8s”,
“OU”: “System”
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------
cat > admin-csr.json <<EOF
{
“CN”: “admin”,
“hosts”: [],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”,
“O”: “system:masters”,
“OU”: “System”
}
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------
cat > kube-proxy-csr.json <<EOF
{
“CN”: “system:kube-proxy”,
“hosts”: [],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”,
“O”: “k8s”,
“OU”: “System”
}
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

生成APIServer访问令牌
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ’ ')
echo $BOOTSTRAP_TOKEN
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”
EOF
mv token.csv /opt/kubernetes/cfg/

按照上面配置将APIServer以服务安装启动
sh apiserver.sh 10.10.10.11 https://10.10.10.11:2379
#!/bin/bash

MASTER_ADDRESS=$1
ETCD_SERVERS=$2

cat </opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS=“–logtostderr=true \
–v=4 \
–etcd-servers= E T C D S E R V E R S − − b i n d − a d d r e s s = {ETCD_SERVERS} \\ --bind-address= ETCDSERVERSbindaddress={MASTER_ADDRESS} \
–secure-port=6443 \
–advertise-address=${MASTER_ADDRESS} \
–allow-privileged=true \
–service-cluster-ip-range=10.0.0.0/24 \
–enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
–authorization-mode=RBAC,Node \
–kubelet-https=true \
–enable-bootstrap-token-auth \
–token-auth-file=/opt/kubernetes/cfg/token.csv \
–service-node-port-range=30000-50000 \
–tls-cert-file=/opt/kubernetes/ssl/server.pem \
–tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
–client-ca-file=/opt/kubernetes/ssl/ca.pem \
–service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
–etcd-cafile=/opt/etcd/ssl/ca.pem \
–etcd-certfile=/opt/etcd/ssl/server.pem \
–etcd-keyfile=/opt/etcd/ssl/server-key.pem”

EOF

cat </usr/lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
主节点controller-manager
配置安装controller-manager 资源维持器
sh controller-manager.sh 127.0.0.1
#!/bin/bash

MASTER_ADDRESS=$1

cat </opt/kubernetes/cfg/kube-controller-manager

KUBE_CONTROLLER_MANAGER_OPTS=“–logtostderr=true \
–v=4 \
–master=${MASTER_ADDRESS}:8080 \
–leader-elect=true \
–address=127.0.0.1 \
–service-cluster-ip-range=10.0.0.0/24 \
–cluster-name=kubernetes \
–cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
–cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
–root-ca-file=/opt/kubernetes/ssl/ca.pem \
–service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
–experimental-cluster-signing-duration=87600h0m0s”

EOF

cat </usr/lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
主节点scheduler
安装配置scheduler资源调度 sh scheduler.sh 127.0.0.1
#!/bin/bash

MASTER_ADDRESS=$1

cat </opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS=“–logtostderr=true \
–v=4 \
–master=${MASTER_ADDRESS}:8080 \
–leader-elect”

EOF

cat </usr/lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

主节点集群授权
主节点创建集群
kubectl create clusterrolebinding kubelet-bootstrap
–clusterrole=system:node-bootstrapper
–user=kubelet-bootstrap

生成工作节点用于访问主节点的配置(改token)
sh kubeconfig.sh 10.10.10.11 /opt/kubernetes/ssl/
APISERVER=$1
SSL_DIR=$2
#改成上面的哪个token.csv的值!!!!!!!!!!
BOOTSTRAP_TOKEN=26b34be575911adb969f820daa29feb4

Create kubelet bootstrapping kubeconfig

export KUBE_APISERVER=“https://$APISERVER:6443”

Set cluster parameters

kubectl config set-cluster kubernetes
–certificate-authority= S S L D I R / c a . p e m   − − e m b e d − c e r t s = t r u e   − − s e r v e r = SSL_DIR/ca.pem \ --embed-certs=true \ --server= SSLDIR/ca.pem embedcerts=true server={KUBE_APISERVER}
–kubeconfig=bootstrap.kubeconfig

Set client authentication parameters

kubectl config set-credentials kubelet-bootstrap
–token=${BOOTSTRAP_TOKEN}
–kubeconfig=bootstrap.kubeconfig

Set context parameters

kubectl config set-context default
–cluster=kubernetes
–user=kubelet-bootstrap
–kubeconfig=bootstrap.kubeconfig

Set default context

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#----------------------

Create the Kube proxy kubeconfig file

kubectl config set-cluster kubernetes
–certificate-authority= S S L D I R / c a . p e m   − − e m b e d − c e r t s = t r u e   − − s e r v e r = SSL_DIR/ca.pem \ --embed-certs=true \ --server= SSLDIR/ca.pem embedcerts=true server={KUBE_APISERVER}
–kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy
–client-certificate= S S L D I R / k u b e − p r o x y . p e m   − − c l i e n t − k e y = SSL_DIR/kube-proxy.pem \ --client-key= SSLDIR/kubeproxy.pem clientkey=SSL_DIR/kube-proxy-key.pem
–embed-certs=true
–kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default
–cluster=kubernetes
–user=kube-proxy
–kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

将工作节点访问主的配置拷过去
scp /root/bootstrap.kubeconfig /root/kube-proxy.kubeconfig 10.10.10.12:/opt/kubernetes/cfg/
scp /root/bootstrap.kubeconfig /root/kube-proxy.kubeconfig 10.10.10.13:/opt/kubernetes/cfg/
scp /root/kubernetes/server/bin/kubelet /root/kubernetes/server/bin/kube-proxy 10.10.10.12:/opt/kubernetes/bin/
scp /root/kubernetes/server/bin/kubelet /root/kubernetes/server/bin/kube-proxy 10.10.10.13:/opt/kubernetes/bin/

工作节点kubelet代理
两个工作节点配置kubelet代理 (注意authentication:那块的空格)
sh kubelet.sh 10.10.10.12
#!/bin/bash

NODE_ADDRESS= 1 D N S S E R V E R I P = 1 DNS_SERVER_IP= 1DNSSERVERIP={2:-“10.0.0.2”}

cat </opt/kubernetes/cfg/kubelet

KUBELET_OPTS=“–logtostderr=true \
–v=4 \
–hostname-override=${NODE_ADDRESS} \
–kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
–bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
–config=/opt/kubernetes/cfg/kubelet.config \
–cert-dir=/opt/kubernetes/ssl \
–pod-infra-container-image=docker.io/kubernetes/pause:latest”

EOF

cat </opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:

  • ${DNS_SERVER_IP}
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
    anonymous:
    enabled: true

EOF

cat </usr/lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

工作节点kube-proxy网络代理
两个工作节点配置kube-proxy网络代理 sh kube-proxy.sh 10.10.10.12
#!/bin/bash

NODE_ADDRESS=$1

cat </opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS=“–logtostderr=true \
–v=4 \
–hostname-override=${NODE_ADDRESS} \
–cluster-cidr=10.0.0.0/24 \
–proxy-mode=ipvs \
–kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig”

EOF

cat </usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

主节点授权
主节点接受工作节点加入(注意替换节点ID)
kubectl get csr

后面的节点ID替换为上面命令的输出

kubectl certificate approve node-csr-NK3x

查看工作节点状态

kubectl get node

添加认证用户用于进入容器和查看日志

kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous

配置coredns让容器间可以通过名字访问,查看状态(需要等一段时间,保证Running状态)
kubectl apply -f http://dl.jxit.net.cn/k8s/coredns.yaml
kubectl get all -n kube-system

查看工作节点状态,部署测试
kubectl create deployment nginx --image=evild/alpine-nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl create deployment apache --image=evild/alpine-nginx
kubectl expose deployment apache --port=80 --type=NodePort

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐