搭建kubernetes集群

kubeadm

初始化操作
# 1 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 2 关闭iptables服务
[root@master ~]# systemctl stop iptables
[root@master ~]# systemctl disable iptables

# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config  # 永久
setenforce 0  # 临时

# 关闭swap
swapoff -a  # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久

# 关闭完swap后,一定要重启一下虚拟机!!!
# 根据规划设置主机名
hostnamectl set-hostname <hostname>

# 在master添加hosts
cat >> /etc/hosts << EOF
172.26.22.200 master
172.26.22.201 salve
EOF


# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system  # 生效


# 时间同步
yum install ntpdate -y
ntpdate time.windows.com
安装软件(master、所有node节点)

安装docker,注意版本

# 1、切换镜像源
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 2、查看当前镜像源中支持的docker版本
[root@master ~]# yum list docker-ce --showduplicates

# 3、安装特定版本的docker-ce
# 必须制定--setopt=obsoletes=0,否则yum会自动安装更高版本
[root@master ~]# yum install --setopt=obsoletes=0 docker-ce-18.06.3.ce-3.el7 -y

# 4、添加一个配置文件
#Docker 在默认情况下使用Vgroup Driver为cgroupfs,而Kubernetes推荐使用systemd来替代cgroupfs
[root@master ~]# mkdir /etc/docker
[root@master ~]# cat <<EOF> /etc/docker/daemon.json
{
	"exec-opts": ["native.cgroupdriver=systemd"],
	"registry-mirrors": ["https://kn0t2bca.mirror.aliyuncs.com"]
}
EOF

# 5、启动dokcer
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker

修改源

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装 kubeadm、kubelet、kubectl

yum install -y kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6

systemctl enable kubelet

# 配置关闭 Docker 的 cgroups,修改 /etc/docker/daemon.json,加入以下内容
{"exec-opts": ["native.cgroupdriver=systemd"]}

# 重启 docker
systemctl daemon-reload
systemctl restart docker
Kubernetes Master初始化
# 在 Master 节点下执行

kubeadm init \
      --apiserver-advertise-address=172.26.22.200 \
      --image-repository registry.aliyuncs.com/google_containers \
      --kubernetes-version v1.23.6 \
      --service-cidr=10.96.0.0/12 \
      --pod-network-cidr=10.244.0.0/16
      
      
"""
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.26.22.200:6443 --token to99q6.h0nbbv93hwovz6el \
	--discovery-token-ca-cert-hash sha256:3a63cffd591cbb9b0842cd39169fddae83099359664a09e09cc8aa91b671e121
"""

# 安装成功后,复制如下配置并执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get nodes
Kubernetes Node加入集群
分别在 k8s-node1 和 k8s-node2 执行

# 下方命令可以在 k8s master 控制台初始化成功后复制 join 命令

kubeadm join 192.168.113.120:6443 --token w34ha2.66if2c8nwmeat9o7 --discovery-token-ca-cert-hash sha256:20e2227554f8883811c01edd850f0cf2f396589d32b57b9984de3353a7389477


# 如果初始化的 token 不小心清空了,可以通过如下命令获取或者重新申请
# 如果 token 已经过期,就重新申请
kubeadm token create

# token 没有过期可以通过如下命令获取
kubeadm token list

# 获取 --discovery-token-ca-cert-hash 值,得到值后需要在前面拼接上 sha256:
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
部署 CNI 网络插件
# 在 master 节点上执行
# 下载 calico 配置文件,可能会网络超时
# curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml -O
curl -O https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
# kubectl apply –f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml


# 修改 calico.yaml 文件中的 CALICO_IPV4POOL_CIDR 配置,修改为与初始化的 cidr 相同
10.244.0.0/16

# 修改 IP_AUTODETECTION_METHOD 下的网卡名称

# 删除镜像 docker.io/ 前缀,避免下载过慢导致失败
sed -i 's#docker.io/##g' calico.yaml

# 部署
kubectl apply -f calico.yaml
# 查看是否正常
kubectl get cs
测试 kubernetes 集群
# 创建部署
kubectl create deployment nginx --image=nginx

# 暴露端口
kubectl expose deployment nginx --port=80 --type=NodePort

# 查看 pod 以及服务信息
kubectl get pod,svc
停止服务并删除原来的配置
kubeadm reset -f
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*




http://unifygateway.htsec.com/search_ehtc/csearch/ehtc_app?app_name=ehtc_app&appid=199608&page=1&per_page=5&query=海通&scene=all&sign_value=9ca66afd2c1c98aea79ec2c63f320789&timestamp=169208289

二进制搭建(单master集群)

初始化操作
# 1 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 2 关闭iptables服务
[root@master ~]# systemctl stop iptables
[root@master ~]# systemctl disable iptables

# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config  # 永久
setenforce 0  # 临时

# 关闭swap
swapoff -a  # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久

# 关闭完swap后,一定要重启一下虚拟机!!!
# 根据规划设置主机名
hostnamectl set-hostname <hostname>

# 在master添加hosts
cat >> /etc/hosts << EOF
172.26.22.200 master
172.26.22.201 salve
EOF


# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system  # 生效


# 时间同步
yum install ntpdate -y
ntpdate time.windows.com
部署etcd集群

Etcd 是一个分布式键值存储系统,Kubernetes 使用 Etcd 进行数据存储,所以先准备一个 Etcd 数据库,为解决 Etcd 单点故障,应采用集群方式部署,这里使用 3 台组建集群,可容忍 1 台机器故障,当然,你也可以使用 5 台组建集群,可容忍 2 台机器故障。

注:为了节省机器,这里与 K8s 节点机器复用。也可以独立于 k8s 集群之外部署,只要apiserver 能连接到就行。

  • 准备cfssl证书生成工具

    cfssl 是一个开源的证书管理工具,使用 json 文件生成证书,相比 openssl 更方便使用。

    # 找任意一台服务器操作,这里用 Master 节点。
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
    
  • 生成 Etcd 证书

    • 自签证书颁发机构(CA)

      # 创建工作目录
      mkdir -p ~/TLS/{etcd,k8s}
      cd TLS/etcd
      
      # 自签CA证书
      cat > ca-config.json << EOF
      {
        "signing": {
          "default": {
            "expiry": "87600h"
          },
          "profiles": {
            "www": {
               "expiry": "87600h",
               "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ]
            }
          }
        }
      }
      EOF
      
      cat > ca-csr.json << EOF
      {
          "CN": "etcd CA",
          "key": {
              "algo": "rsa",
              "size": 2048
          },
          "names": [
              {
                  "C": "CN",
                  "L": "Beijing",
                  "ST": "Beijing"
              }
          ]
      }
      EOF
      
      
      
      # 生成证书
      cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
      ls *pem
      ca-key.pem ca.pem
      
    • 使用自签 CA 签发 Etcd HTTPS 证书

      hosts字段中ip为所有etcd节点的集群内部通信ip,一个都不能少,为了方便后期扩容可以多写几个预留的ip。

      # 创建证书申请文件
      cat > server-csr.json << EOF
      {
          "CN": "etcd",
          "hosts": [
              "172.26.22.200",
              "172.26.22.201"
          ],
          "key": {
              "algo": "rsa",
              "size": 2048
          },
          "names": [
              {
                  "C": "CN",
                  "L": "BeiJing",
                  "ST": "BeiJing"
              }
          ]
      }
      EOF
      
      # 生成证书
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
      
      ls server*pem
      server-key.pem server.pem
      
  • 从github上下载二进制文件

    #下载后上传到服务器任意位置即可
    https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
    
  • 部署etcd集群

    涉及到的服务器都要如下操作

    # 创建工作目录并解压二进制包
    mkdir /opt/etcd/{bin,cfg,ssl} –p
    tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
    mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
    
    # 创建 etcd 配置文件
    cat > /opt/etcd/cfg/etcd.conf << EOF
    #[Member]
    ETCD_NAME="etcd-1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://172.26.22.200:2380"
    ETCD_LISTEN_CLIENT_URLS="https://172.26.22.200:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.26.22.200:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://172.26.22.200:2379"
    ETCD_INITIAL_CLUSTER="etcd-1=https://172.26.22.200:2380,etcd-2=https://172.26.22.201:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    EOF
    

    配置说明

    • ETCD_NAME: 节点名称,集群中唯一
    • ETCD_DATA_DIR:数据目录
    • ETCD_LISTEN_PEER_URLS:集群通讯监听地址
    • ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
    • ETCD_INITIAL_CLUSTER:集群节点地址
    • ETCD_INITIALCLUSTER_TOKEN:集群Token
    • ETCD_INITIALCLUSTER_STATE:加入集群的状态:new是新集群,existing表示加入已有集群
    # systemd 管理 etcd
    cat > /usr/lib/systemd/system/etcd.service << EOF
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/opt/etcd/cfg/etcd.conf
    ExecStart=/opt/etcd/bin/etcd \
    --cert-file=/opt/etcd/ssl/server.pem \
    --key-file=/opt/etcd/ssl/server-key.pem \
    --peer-cert-file=/opt/etcd/ssl/server.pem \
    --peer-key-file=/opt/etcd/ssl/server-key.pem \
    --trusted-ca-file=/opt/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
    --logger=zap
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    # 拷贝刚才生成的证书
    cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
    
    # 将上面节点 1 所有生成的文件拷贝到节点 2 和节点 3
    for i in {2..3}
    do
    scp -r /opt/etcd/ root@192.168.242.5$i:/opt/
    scp /usr/lib/systemd/system/etcd.service root@192.168.242.5$i:/usr/lib/systemd/system/
    done
    
    # 修改节点2,节点3 ,etcd.conf配置文件中的节点名称和当前服务器IP
    #[Member]
    ETCD_NAME="etcd-1"    #节点2修改为: etcd-2 节点3修改为: etcd-3
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.242.51:2380"  #修改为对应节点IP
    ETCD_LISTEN_CLIENT_URLS="https://192.168.242.51:2379"  #修改为对应节点IP
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.242.51:2380" #修改为对应节点IP
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.242.51:2379" #修改为对应节点IP
    ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.242.51:2380,etcd-2=https://192.168.242.52:2380,etcd-3=https://192.168.242.53:2380"  
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    
    
    # 启动并设置开机启动
    # etcd须多个节点同时启动,不然执行systemctl start etcd会一直卡在前台,连接其他节点,建议通过批量管理工具,或者脚本同时启动etcd。
    systemctl daemon-reload
    systemctl start etcd
    systemctl enable etcd
    
    # 检查etcd集群状态
    [root@k8s-master1 ~]# ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.242.51:2379,https://192.168.242.52:2379,https://192.168.242.53:2379" endpoint health --write-out=table
    +-----------------------------+--------+-------------+-------+
    |          ENDPOINT           | HEALTH |    TOOK     | ERROR |
    +-----------------------------+--------+-------------+-------+
    | https://192.168.242.52:2379 |   true | 67.267851ms |       |
    | https://192.168.242.51:2379 |   true | 67.374967ms |       |
    | https://192.168.242.53:2379 |   true | 69.244918ms |       |
    +-----------------------------+--------+-------------+-------+
    
    # etcd问题排查
    less /var/log/message
    journalctl -u etcd
    
安装Docker

k8s在1.20版本就不在支持docker,在1.24版本移除了docker

  • 解压二进制包

    wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
    tar -xf docker-19.03.9.tgz
    mv docker/* /usr/bin/
    
  • 配置镜像加速

    sudo mkdir -p /etc/docker
    sudo tee /etc/docker/daemon.json <<-'EOF'
    {
      "registry-mirrors": ["https://3s9106.mirror.alncs.com"]
    }
    EOF
    
  • 启动并设置开机自启动

    systemctl daemon-reload
    systemctl start docker
    systemctl enable docker
    
部署master节点
  • 生成kube-apiserver证书

    • 自签证书颁发机构(ca)

      cd ~/TLS/k8s
      
      cat > ca-config.json << EOF
      {
        "signing": {
          "default": {
            "expiry": "87600h"
          },
          "profiles": {
            "kubernetes": {
               "expiry": "87600h",
               "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ]
            }
          }
        }
      }
      EOF
      cat > ca-csr.json << EOF
      {
          "CN": "kubernetes",
          "key": {
              "algo": "rsa",
              "size": 2048
          },
          "names": [
              {
                  "C": "CN",
                  "L": "Beijing",
                  "ST": "Beijing",
                  "O": "k8s",
                  "OU": "System"
              }
          ]
      }
      EOF
      

      生成证书

      cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
      ls *pem
      ca-key.pem ca.pem
      
    • 使用自签CA签发kube-apiserver https证书

      文件中hosts字段中IP为所有Master/LB/VIP IP,一个都不能少,为了方便后期扩容可以多写几个预留的IP。

      # 创建证书申请文件
      cat > server-csr.json << EOF
      {
          "CN": "kubernetes",
          "hosts": [
            "10.0.0.1",
            "127.0.0.1",
            "192.168.242.51",
            "192.168.242.52",
            "192.168.242.53",
            "192.168.242.54",
            "192.168.242.55",
            "kubernetes",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local"
          ],
          "key": {
              "algo": "rsa",
              "size": 2048
          },
          "names": [
              {
                  "C": "CN",
                  "L": "BeiJing",
                  "ST": "BeiJing",
                  "O": "k8s",
                  "OU": "System"
              }
          ]
      }
      EOF
      
  • 下载

    下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md

  • 解压二进制包

    上传刚才下载的k8s软件包到服务器上

    mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} 
    tar zxvf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes/server/bin
    cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin
    cp kubectl /usr/bin/
    
  • 部署kube-apiserver
    • 创建配置文件

      cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
      KUBE_APISERVER_OPTS="--logtostderr=false \\
      --v=2 \\
      --log-dir=/opt/kubernetes/logs \\
      --etcd-servers=https://192.168.242.51:2379,https://192.168.242.52:2379,https://192.168.242.53:2379 \\
      --bind-address=192.168.242.51 \\
      --secure-port=6443 \\
      --advertise-address=192.168.242.51 \\
      --allow-privileged=true \\
      --service-cluster-ip-range=10.0.0.0/24 \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
      --authorization-mode=RBAC,Node \\
      --enable-bootstrap-token-auth=true \\
      --token-auth-file=/opt/kubernetes/cfg/token.csv \\
      --service-node-port-range=30000-32767 \\
      --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
      --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
      --tls-cert-file=/opt/kubernetes/ssl/server.pem  \\
      --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
      --client-ca-file=/opt/kubernetes/ssl/ca.pem \\
      --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
      --service-account-issuer=api \\
      --service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
      --etcd-cafile=/opt/etcd/ssl/ca.pem \\
      --etcd-certfile=/opt/etcd/ssl/server.pem \\
      --etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
      --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
      --proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
      --proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
      --requestheader-allowed-names=kubernetes \\
      --requestheader-extra-headers-prefix=X-Remote-Extra- \\
      --requestheader-group-headers=X-Remote-Group \\
      --requestheader-username-headers=X-Remote-User \\
      --enable-aggregator-routing=true \\
      --audit-log-maxage=30 \\
      --audit-log-maxbackup=3 \\
      --audit-log-maxsize=100 \\
      --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
      EOF
      

      说明:上面两个\第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符。

      • –logtostderr :启用日志
      • –v :日志等级
      • –log-dir :日志目录
      • –etcd-servers :etcd集群地址
      • –bind-address :监听地址
      • –secure-port :https安全端口
      • –advertise-address :集群通告地址
      • –allow-privileged :启动授权
      • –service-cluster-ip-range :Service虚拟IP地址段
      • –enable-admission-plugins : 准入控制模块
      • –authorization-mode :认证授权,启用RBAC授权和节点自管理
      • –enable-bootstrap-token-auth :启用TLS bootstrap机制
      • –token-auth-file :bootstrap token文件
      • –service-node-port-range :Service nodeport类型默认分配端口范围
      • –kubelet-client-xxx :apiserver访问kubelet客户端证书
      • –tls-xxx-file :apiserver https证书
      • 1.20版本必须加的参数:–service-account-issuer,–service-account-signing-key-file
      • –etcd-xxxfile :连接etcd集群证书
      • –audit-log-xxx :审计日志
      • 启动聚合层网关配置:–requestheader-client-ca-file,–proxy-client-cert-file,–proxy-client-key-file,–requestheader-allowed-names,–requestheader-extra-headers-prefix,–requestheader-group-headers,–requestheader-username-headers,–enable-aggregator-routing
    • 拷贝刚才生成的证书

      cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/
      
    • 启用TLS bootstrapping机制

      TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。

      > TLS bootstrapping工作流程
      
      <div align=center><img src="https://img-blog.csdnimg.cn/7f9ce4b198154a11bbb6319d285af487.png" width="600"></div>
      
      > 创建上述配置文件中token文件:
      
      ```
      cat > /opt/kubernetes/cfg/token.csv << EOF
      4136692876ad4b01bb9dd0988480ebba,kubelet-bootstrap,10001,"system:node-bootstrapper"
      EOF
      
      # 格式:token,用户名,UID,用户组
      # token也可自行生成替换
      head -c 16 /dev/urandom | od -An -t x | tr -d ' '
      ```
      
      • systemd管理apiserve

        cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
        [Unit]
        Description=Kubernetes API Server
        Documentation=https://github.com/kubernetes/kubernetes
        
        [Service]
        EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
        ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
        Restart=on-failure
        
        [Install]
        WantedBy=multi-user.target
        EOF
        
      • 启动并设置开机启动

        systemctl daemon-reload
        systemctl start kube-apiserver 
        systemctl enable kube-apiserver
        
  • 部署kube-controller-manager
    • 创建配置文件

      cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
      KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
      --v=2 \\
      --log-dir=/opt/kubernetes/logs \\
      --leader-elect=true \\
      --kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
      --bind-address=127.0.0.1 \\
      --allocate-node-cidrs=true \\
      --cluster-cidr=10.244.0.0/16 \\
      --service-cluster-ip-range=10.0.0.0/24 \\
      --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
      --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\
      --root-ca-file=/opt/kubernetes/ssl/ca.pem \\
      --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
      --cluster-signing-duration=87600h0m0s"
      EOF
      
      • –kubeconfig :连接apiserver配置文件。
      • –leader-elect :当该组件启动多个时,自动选举(HA)
      • –cluster-signing-cert-file :自动为kubelet颁发证书的CA,apiserver保持一致
      • –cluster-signing-key-file :自动为kubelet颁发证书的CA,apiserver保持一致
    • 生成kubeconfig文件

      生成kube-controller-manager证书 :

      # 切换工作目录
      cd ~/TLS/k8s
      
      # 创建证书请求文件
      cat > kube-controller-manager-csr.json << EOF
      {
        "CN": "system:kube-controller-manager",
        "hosts": [],
        "key": {
          "algo": "rsa",
          "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "BeiJing", 
            "ST": "BeiJing",
            "O": "system:masters",
            "OU": "System"
          }
        ]
      }
      EOF
      
      # 生成证书
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
      

      生成kubeconfig文件(以下是shell命令,直接在shell终端执行)

      KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
      KUBE_APISERVER="https://192.168.242.51:6443"
      
      kubectl config set-cluster kubernetes \
        --certificate-authority=/opt/kubernetes/ssl/ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-credentials kube-controller-manager \
        --client-certificate=./kube-controller-manager.pem \
        --client-key=./kube-controller-manager-key.pem \
        --embed-certs=true \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-context default \
        --cluster=kubernetes \
        --user=kube-controller-manager \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
      
    • systemd管理controller-manager

      cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
      [Unit]
      Description=Kubernetes Controller Manager
      Documentation=https://github.com/kubernetes/kubernetes
      
      [Service]
      EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
      ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
      Restart=on-failure
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    • 启动并设置开机自启

      systemctl daemon-reload
      systemctl start kube-controller-manager
      systemctl enable kube-controller-manager
      
  • 部署 kube-scheduler
    • 创建配置文件

      cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
      KUBE_SCHEDULER_OPTS="--logtostderr=false \\
      --v=2 \\
      --log-dir=/opt/kubernetes/logs \\
      --leader-elect \\
      --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
      --bind-address=127.0.0.1"
      EOF
      
      • –kubeconfig :连接apiserver配置文件
      • –leader-elect :当该组件启动多个时,自动选举(HA)。
    • 生成kubeconfig文件

      生成kube-scheduler证书 :

      # 切换工作目录
      cd ~/TLS/k8s
      
      # 创建证书请求文件
      cat > kube-scheduler-csr.json << EOF
      {
        "CN": "system:kube-scheduler",
        "hosts": [],
        "key": {
          "algo": "rsa",
          "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:masters",
            "OU": "System"
          }
        ]
      }
      EOF
      
      # 生成证书
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
      

      生成kubeconfig文件 :

      KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
      KUBE_APISERVER="https://192.168.242.51:6443"
      
      kubectl config set-cluster kubernetes \
        --certificate-authority=/opt/kubernetes/ssl/ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-credentials kube-scheduler \
        --client-certificate=./kube-scheduler.pem \
        --client-key=./kube-scheduler-key.pem \
        --embed-certs=true \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-context default \
        --cluster=kubernetes \
        --user=kube-scheduler \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
      
    • systemd管理scheduler

      cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
      [Unit]
      Description=Kubernetes Scheduler
      Documentation=https://github.com/kubernetes/kubernetes
      
      [Service]
      EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
      ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
      Restart=on-failure
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    • 启动并设置开机启动

      systemctl daemon-reload
      systemctl start kube-scheduler
      systemctl enable kube-scheduler
      
    • 查看集群状态

      生成kubectl连接集群的证书 :

      cat > admin-csr.json <<EOF
      {
        "CN": "admin",
        "hosts": [],
        "key": {
          "algo": "rsa",
          "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:masters",
            "OU": "System"
          }
        ]
      }
      EOF
      
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
      

      生成kubeconfig文件 :

      mkdir /root/.kube
      
      KUBE_CONFIG="/root/.kube/config"
      KUBE_APISERVER="https://192.168.242.51:6443"
      
      kubectl config set-cluster kubernetes \
        --certificate-authority=/opt/kubernetes/ssl/ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-credentials cluster-admin \
        --client-certificate=./admin.pem \
        --client-key=./admin-key.pem \
        --embed-certs=true \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-context default \
        --cluster=kubernetes \
        --user=cluster-admin \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
      

      通过kubectl工具查看当前集群组件状态 :

      [root@k8s-master1 k8s]# kubectl get cs
      Warning: v1 ComponentStatus is deprecated in v1.19+
      NAME                 STATUS    MESSAGE             ERROR
      scheduler            Healthy   ok                  
      controller-manager   Healthy   ok                  
      etcd-2               Healthy   {"health":"true"}   
      etcd-0               Healthy   {"health":"true"}   
      etcd-1               Healthy   {"health":"true"}
      
      
      ----
      如上说明Master节点组件运行正常。
      
    • 授权kubelet-bootstrap用户允许请求证书

      kubectl create clusterrolebinding kubelet-bootstrap \
      --clusterrole=system:node-bootstrapper \
      --user=kubelet-bootstrap
      
部署master node节点

下面还是在master node上面操作,即当Master节点,也当Work Node节点

  • 创建工作目录并拷贝二进制文件

    注: 在所有work node创建工作目录

    mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} 
    

    从master节点k8s-server软件包中拷贝到所有work节点:

    #进入到k8s-server软件包目录
    cd /k8s-software/kubernetes/server/bin/
    
    for i in {1..3}
    do
    scp kubelet  kube-proxy root@192.168.242.5$i:/opt/kubernetes/bin/
    done
    
  • 部署kubelet
    • 创建配置文件

      cat > /opt/kubernetes/cfg/kubelet.conf << EOF
      KUBELET_OPTS="--logtostderr=false \\
      --v=2 \\
      --log-dir=/opt/kubernetes/logs \\
      --hostname-override=k8s-master1 \\
      --network-plugin=cni \\
      --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
      --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
      --config=/opt/kubernetes/cfg/kubelet-config.yml \\
      --cert-dir=/opt/kubernetes/ssl \\
      --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
      EOF
      
      • –hostname-override :显示名称,集群唯一(不可重复)。
      • –network-plugin :启用CNI。
      • –kubeconfig : 空路径,会自动生成,后面用于连接apiserver。
      • –bootstrap-kubeconfig :首次启动向apiserver申请证书。
      • –config :配置文件参数。
      • –cert-dir :kubelet证书目录。
      • –pod-infra-container-image :管理Pod网络容器的镜像 init container
    • 配置文件

      cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
      kind: KubeletConfiguration
      apiVersion: kubelet.config.k8s.io/v1beta1
      address: 0.0.0.0
      port: 10250
      readOnlyPort: 10255
      cgroupDriver: cgroupfs
      clusterDNS:
      - 10.0.0.2
      clusterDomain: cluster.local 
      failSwapOn: false
      authentication:
        anonymous:
          enabled: false
        webhook:
          cacheTTL: 2m0s
          enabled: true
        x509:
          clientCAFile: /opt/kubernetes/ssl/ca.pem 
      authorization:
        mode: Webhook
        webhook:
          cacheAuthorizedTTL: 5m0s
          cacheUnauthorizedTTL: 30s
      evictionHard:
        imagefs.available: 15%
        memory.available: 100Mi
        nodefs.available: 10%
        nodefs.inodesFree: 5%
      maxOpenFiles: 1000000
      maxPods: 110
      EOF
      
    • 生成kubelet初次加入集群引导kubeconfig文件

      KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig"
      KUBE_APISERVER="https://192.168.242.51:6443" # apiserver IP:PORT
      TOKEN="4136692876ad4b01bb9dd0988480ebba" # 与token.csv里保持一致  /opt/kubernetes/cfg/token.csv 
      
      # 生成 kubelet bootstrap kubeconfig 配置文件
      kubectl config set-cluster kubernetes \
        --certificate-authority=/opt/kubernetes/ssl/ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-credentials "kubelet-bootstrap" \
        --token=${TOKEN} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-context default \
        --cluster=kubernetes \
        --user="kubelet-bootstrap" \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
      
    • systemd管理kubelet

      cat > /usr/lib/systemd/system/kubelet.service << EOF
      [Unit]
      Description=Kubernetes Kubelet
      After=docker.service
      
      [Service]
      EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
      ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
      Restart=on-failure
      LimitNOFILE=65536
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    • 启动并设置开机启动

      systemctl daemon-reload
      systemctl start kubelet
      systemctl enable kubelet
      
    • 允许kubelet证书申请并加入集群

      #查看kubelet证书请求
      [root@k8s-master1 bin]# kubectl get csr
      NAME                                                   AGE    SIGNERNAME                                    REQUESTOR           CONDITION
      node-csr-KbHieprZUMOvTFMHGQ1RNTZEhsSlT5X6wsh2lzfUry4   107s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
      
      #允许kubelet节点申请
      [root@k8s-master1 bin]# kubectl certificate approve  node-csr-KbHieprZUMOvTFMHGQ1RNTZEhsSlT5X6wsh2lzfUry4
      certificatesigningrequest.certificates.k8s.io/node-csr-KbHieprZUMOvTFMHGQ1RNTZEhsSlT5X6wsh2lzfUry4 approved
      
      #查看申请
      [root@k8s-master1 bin]# kubectl get csr
      NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
      node-csr-KbHieprZUMOvTFMHGQ1RNTZEhsSlT5X6wsh2lzfUry4   2m35s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
      
      #查看节点
      [root@k8s-master1 bin]# kubectl get nodes
      NAME          STATUS     ROLES    AGE     VERSION
      k8s-master1   NotReady   <none>   2m11s   v1.20.10
      

      由于网络插件还没有部署,节点会没有准备就绪NotReady

  • 部署kube-proxy
    • 创建配置文件

      cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
      KUBE_PROXY_OPTS="--logtostderr=false \\
      --v=2 \\
      --log-dir=/opt/kubernetes/logs \\
      --config=/opt/kubernetes/cfg/kube-proxy-config.yml"
      EOF
      
    • 配置参数文件

      cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
      kind: KubeProxyConfiguration
      apiVersion: kubeproxy.config.k8s.io/v1alpha1
      bindAddress: 0.0.0.0
      metricsBindAddress: 0.0.0.0:10249
      clientConnection:
        kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
      hostnameOverride: k8s-master1
      clusterCIDR: 10.244.0.0/16
      EOF
      
    • 生成kube-proxy证书文件

      # 切换工作目录
      cd ~/TLS/k8s
      
      # 创建证书请求文件
      cat > kube-proxy-csr.json << EOF
      {
        "CN": "system:kube-proxy",
        "hosts": [],
        "key": {
          "algo": "rsa",
          "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
          }
        ]
      }
      EOF
      
      # 生成证书
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
      
    • 生成kube-proxy.kubeconfig文件

      KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
      KUBE_APISERVER="https://192.168.242.51:6443"
      
      kubectl config set-cluster kubernetes \
        --certificate-authority=/opt/kubernetes/ssl/ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-credentials kube-proxy \
        --client-certificate=./kube-proxy.pem \
        --client-key=./kube-proxy-key.pem \
        --embed-certs=true \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config set-context default \
        --cluster=kubernetes \
        --user=kube-proxy \
        --kubeconfig=${KUBE_CONFIG}
        
      kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
      
    • systemd管理kube-proxy

      cat > /usr/lib/systemd/system/kube-proxy.service << EOF
      [Unit]
      Description=Kubernetes Proxy
      After=network.target
      
      [Service]
      EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
      ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
      Restart=on-failure
      LimitNOFILE=65536
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    • 启动并设置开机自启

      systemctl daemon-reload
      systemctl start kube-proxy
      systemctl enable kube-proxy
      
  • 部署网络组件(Calico)

    Calico是一个纯三层的数据中心网络方案,是目前Kubernetes主流的网络方案。

    kubectl apply -f calico.yaml
    kubectl get pods -n kube-system
    
    # 等Calico Pod都Running,节点也会准备就绪。
    [root@k8s-master1 yaml]# kubectl get pods -n kube-system
    NAME                                      READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-97769f7c7-zcz5d   1/1     Running   0          3m11s
    calico-node-5tnll                         1/1     Running   0          3m11s
    
    [root@k8s-master1 yaml]# kubectl get nodes
    NAME          STATUS   ROLES    AGE   VERSION
    k8s-master1   Ready    <none>   21m   v1.20.10
    
  • 授权apiserver访问kubelet

    应用场景:如kubectl logs

    cat > apiserver-to-kubelet-rbac.yaml << EOF
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:kube-apiserver-to-kubelet
    rules:
      - apiGroups:
          - ""
        resources:
          - nodes/proxy
          - nodes/stats
          - nodes/log
          - nodes/spec
          - nodes/metrics
          - pods/log
        verbs:
          - "*"
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: system:kube-apiserver
      namespace: ""
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:kube-apiserver-to-kubelet
    subjects:
      - apiGroup: rbac.authorization.k8s.io
        kind: User
        name: kubernetes
    EOF
    
    kubectl apply -f apiserver-to-kubelet-rbac.yaml
    
新增加work node

所有node节点可以参考如下

  • 拷贝以部署好的相关文件到新节点

    在Master节点将Work Node涉及文件拷贝到新节点 242.52/242.53

    for i in {2..3}; do scp -r /opt/kubernetes root@192.168.242.5$i:/opt/; done
    
    for i in {2..3}; do scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.242.5$i:/usr/lib/systemd/system; done
    
    for i in {2..3}; do scp -r /opt/kubernetes/ssl/ca.pem root@192.168.242.5$i:/opt/kubernetes/ssl/; done
    
  • 删除kubelet证书和kubeconfig文件
    rm -f /opt/kubernetes/cfg/kubelet.kubeconfig 
    rm -f /opt/kubernetes/ssl/kubelet*
    
    这几个文件是证书申请审批后自动生成的,每个Node不同,必须删除
    
  • 修改主机名
    vi /opt/kubernetes/cfg/kubelet.conf
    --hostname-override=k8s-node1
    
    vi /opt/kubernetes/cfg/kube-proxy-config.yml
    hostnameOverride: k8s-node1
    
  • 启动并设置开机自启
    systemctl daemon-reload
    systemctl start kubelet kube-proxy
    systemctl enable kubelet kube-proxy
    
  • 在Master上同意新的Node kubelet证书申请
    #查看证书请求
    [root@k8s-master1 kubernetes]# kubectl get csr
    NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
    node-csr-2vKShQc_wlqPrTPAwT5MHpdRWIX-oyr9NyBXu1XNwxg   12s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
    node-csr-KbHieprZUMOvTFMHGQ1RNTZEhsSlT5X6wsh2lzfUry4   47h   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
    #同意
    [root@k8s-master1 kubernetes]# kubectl certificate approve node-csr-2vKShQc_wlqPrTPAwT5MHpdRWIX-oyr9NyBXu1XNwxg
    certificatesigningrequest.certificates.k8s.io/node-csr-2vKShQc_wlqPrTPAwT5MHpdRWIX-oyr9NyBXu1XNwxg approved
    
  • 查看Node状态(要稍等会才会变成ready,会下载一些初始化镜像)

    [root@k8s-master1 kubernetes]# kubectl get nodes
    NAME          STATUS   ROLES    AGE   VERSION
    k8s-master1   Ready    <none>   46h   v1.20.10
    k8s-node1     Ready    <none>   77s   v1.20.10
    
部署Dashboard和CoreDNS
  • 部署Dashboard

    访问地址: https://NodeIP:30001
    创建service account并绑定默认cluster-admin管理员集群角色

    kubectl apply -f kubernetes-dashboard.yaml
    
    #查看部署情况
    [root@k8s-master1 yaml]#  kubectl get pods,svc -n kubernetes-dashboard
    NAME                                             READY   STATUS    RESTARTS   AGE
    pod/dashboard-metrics-scraper-7b59f7d4df-k49t9   1/1     Running   0          10m
    pod/kubernetes-dashboard-74d688b6bc-l9jz4        1/1     Running   0          10m
    
    NAME                                TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
    service/dashboard-metrics-scraper   ClusterIP   10.0.0.206   <none>        8000/TCP        10m
    service/kubernetes-dashboard        NodePort    10.0.0.10    <none>        443:30001/TCP   10m
    
    kubectl create serviceaccount dashboard-admin -n kube-system
    kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
    kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
    

    使用输出的token登陆Dashboard(如访问提示https异常,可使用火狐浏览器)

  • 部署CoreDNS

    CoreDNS主要用于集群内部Service名称解析。

    [root@k8s-master1 yaml]# kubectl apply -f coredns.yaml 
    
    [root@k8s-master1 yaml]# kubectl get pods -n kube-system
    NAME                                      READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-97769f7c7-zcz5d   1/1     Running   1          47h
    calico-node-5tnll                         1/1     Running   1          47h
    calico-node-m8sdg                         1/1     Running   0          42m
    calico-node-pqvk9                         1/1     Running   0          56m
    coredns-6cc56c94bd-5hvfb                  1/1     Running   0          37s
    

    测试解析是否正常

    [root@k8s-master1 yaml]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh 
    If you don't see a command prompt, try pressing enter.
    / # ns
    nsenter   nslookup
    / # nslookup kubernetes
    Server:    10.0.0.2
    Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
    

    至此一个单Master的k8s节点就已经完成了

从节点使用kubectl
# 1. 将 master 节点中 /etc/kubernetes/admin.conf 拷贝到需要运行的服务器的 /etc/kubernetes 目录中
scp /etc/kubernetes/admin.conf root@k8s-node1:/etc/kubernetes

# 2. 在对应的服务器上配置环境变量
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
清空k8s环境
#!/bin/bash
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
yum -y remove kubeadm* kubectl* kubelet* docker*
reboot
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐