首先需要强调一下本次搭建三台使用的机器均使用CentOS-7-x86_64-DVD-2009镜像,三台主机的配置分别为如下:

 再次强调在搭建前一定要确保三台机器能够ping通,同时能够访问外网,接下来就开始安装吧。

master1、node1、node2三台机器进行如下配置:

#####################################################

#将主机名修改为对应的master1,node1,node2

#添加hosts解析;

cat >/etc/hosts<<EOF

127.0.0.1 localhost localhost.localdomain

192.168.3.100 master1

192.168.3.95 node1

192.168.3.90 node2

EOF

#临时关闭selinux和防火墙;

sed -i '/SELINUX/s/enforcing/disabled/g'  /etc/sysconfig/selinux

setenforce  0

systemctl   stop     firewalld.service

systemctl   disable   firewalld.service

#同步节点时间;

yum install ntpdate -y

ntpdate  pool.ntp.org

#关闭swapoff,这个很重要,若不关闭在后续也会提醒,需要看日志排错

swapoff -a

##############################################################

内核参数设置&优化

cat > /etc/modules-load.d/ipvs.conf <<EOF

# Load IPVS at boot

ip_vs

ip_vs_rr

ip_vs_wrr

ip_vs_sh

nf_conntrack_ipv4

EOF

systemctl enable --now systemd-modules-load.service

#确认内核模块加载成功

lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#安装ipset、ipvsadm

yum install -y ipset ipvsadm

#配置内核参数;

cat <<EOF >  /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

EOF

sysctl --system

部署Docker虚拟化

# 安装依赖软件包

yum install -y yum-utils device-mapper-persistent-data lvm2

# 添加Docker repository,这里使用国内阿里云yum源

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安装docker-ce,这里直接安装最新版本

yum install -y docker-ce

#修改docker配置文件

mkdir /etc/docker

cat > /etc/docker/daemon.json <<EOF

{

  "exec-opts": ["native.cgroupdriver=systemd"],

  "log-driver": "json-file",

  "log-opts": {

    "max-size": "100m"

  },

  "storage-driver": "overlay2",

  "storage-opts": [

    "overlay2.override_kernel_check=true"

  ],

  "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]

}

EOF

# 注意,由于国内拉取镜像较慢,配置文件最后增加了registry-mirrors

mkdir -p /etc/systemd/system/docker.service.d

# 重启docker服务

systemctl daemon-reload

systemctl enable docker.service

systemctl start docker.service

ps -ef|grep -aiE docker

##############################################################

添加K8S部署源

cat>>/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

##############################################################

安装Kubeadm工具;

#安装Kubeadm;

yum install -y kubeadm-1.20.4 kubelet-1.20.4 kubectl-1.20.4

#启动kubelet服务

systemctl enable kubelet.service

systemctl start kubelet.service

###########################################################

接下来在master1端执行如下的初始化命令

kubeadm init   --control-plane-endpoint=192.168.3.100:6443 --image-repository registry.aliyuncs.com/google_containers   --kubernetes-version v1.20.4   --service-cidr=10.10.0.0/16   --pod-network-cidr=10.244.0.0/16   --upload-certs

//注意:若报错则需要先使用kubeadm reset重置和rm -rf .kube/

之后按其提示的命令执行

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

###########################################

然后在node1,2两台机器上执行master1产生的加入命令

并提示以下字符:

然后在master1查看节点kubectl get nodes

###################################################

接下来需要使网络互通

部署网络插件calico

kubectl apply -f https://docs.projectcalico.org/v3.10/manifests/calico.yaml

###############################################

部署UI界面

创建文件 vim recommended.yaml

并粘贴如下代码

# Copyright 2017 The Kubernetes Authors.

#

# Licensed under the Apache License, Version 2.0 (the "License");

# you may not use this file except in compliance with the License.

# You may obtain a copy of the License at

#

#     http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

apiVersion: v1

kind: Namespace

metadata:

  name: kubernetes-dashboard

---

apiVersion: v1

kind: ServiceAccount

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

---

kind: Service

apiVersion: v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

spec:

  type: NodePort

  ports:

    - port: 443

      targetPort: 8443

      nodePort: 31001

  selector:

    k8s-app: kubernetes-dashboard

---

apiVersion: v1

kind: Secret

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard-certs

  namespace: kubernetes-dashboard

type: Opaque

---

apiVersion: v1

kind: Secret

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard-csrf

  namespace: kubernetes-dashboard

type: Opaque

data:

  csrf: ""

---

apiVersion: v1

kind: Secret

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard-key-holder

  namespace: kubernetes-dashboard

type: Opaque

---

kind: ConfigMap

apiVersion: v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard-settings

  namespace: kubernetes-dashboard

---

kind: Role

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

rules:

  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.

  - apiGroups: [""]

    resources: ["secrets"]

    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]

    verbs: ["get", "update", "delete"]

    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.

  - apiGroups: [""]

    resources: ["configmaps"]

    resourceNames: ["kubernetes-dashboard-settings"]

    verbs: ["get", "update"]

    # Allow Dashboard to get metrics.

  - apiGroups: [""]

    resources: ["services"]

    resourceNames: ["heapster", "dashboard-metrics-scraper"]

    verbs: ["proxy"]

  - apiGroups: [""]

    resources: ["services/proxy"]

    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]

    verbs: ["get"]

---

kind: ClusterRole

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

rules:

  # Allow Metrics Scraper to get metrics from the Metrics server

  - apiGroups: ["metrics.k8s.io"]

    resources: ["pods", "nodes"]

    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1

kind: RoleBinding

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: Role

  name: kubernetes-dashboard

subjects:

  - kind: ServiceAccount

    name: kubernetes-dashboard

    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: kubernetes-dashboard

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: kubernetes-dashboard

subjects:

  - kind: ServiceAccount

    name: kubernetes-dashboard

    namespace: kubernetes-dashboard

---

kind: Deployment

apiVersion: apps/v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

spec:

  replicas: 1

  revisionHistoryLimit: 10

  selector:

    matchLabels:

      k8s-app: kubernetes-dashboard

  template:

    metadata:

      labels:

        k8s-app: kubernetes-dashboard

    spec:

      containers:

        - name: kubernetes-dashboard

          image: kubernetesui/dashboard:v2.0.0-rc5

          imagePullPolicy: Always

          ports:

            - containerPort: 8443

              protocol: TCP

          args:

            - --auto-generate-certificates

            - --namespace=kubernetes-dashboard

            # Uncomment the following line to manually specify Kubernetes API server Host

            # If not specified, Dashboard will attempt to auto discover the API server and connect

            # to it. Uncomment only if the default does not work.

            # - --apiserver-host=http://my-address:port

          volumeMounts:

            - name: kubernetes-dashboard-certs

              mountPath: /certs

              # Create on-disk volume to store exec logs

            - mountPath: /tmp

              name: tmp-volume

          livenessProbe:

            httpGet:

              scheme: HTTPS

              path: /

              port: 8443

            initialDelaySeconds: 30

            timeoutSeconds: 30

          securityContext:

            allowPrivilegeEscalation: false

            readOnlyRootFilesystem: true

            runAsUser: 1001

            runAsGroup: 2001

      volumes:

        - name: kubernetes-dashboard-certs

          secret:

            secretName: kubernetes-dashboard-certs

        - name: tmp-volume

          emptyDir: {}

      serviceAccountName: kubernetes-dashboard

      nodeSelector:

        "beta.kubernetes.io/os": linux

      # Comment the following tolerations if Dashboard must not be deployed on master

      tolerations:

        - key: node-role.kubernetes.io/master

          effect: NoSchedule

---

kind: Service

apiVersion: v1

metadata:

  labels:

    k8s-app: dashboard-metrics-scraper

  name: dashboard-metrics-scraper

  namespace: kubernetes-dashboard

spec:

  ports:

    - port: 8000

      targetPort: 8000

  selector:

    k8s-app: dashboard-metrics-scraper

---

kind: Deployment

apiVersion: apps/v1

metadata:

  labels:

    k8s-app: dashboard-metrics-scraper

  name: dashboard-metrics-scraper

  namespace: kubernetes-dashboard

spec:

  replicas: 1

  revisionHistoryLimit: 10

  selector:

    matchLabels:

      k8s-app: dashboard-metrics-scraper

  template:

    metadata:

      labels:

        k8s-app: dashboard-metrics-scraper

      annotations:

        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'

    spec:

      containers:

        - name: dashboard-metrics-scraper

          image: kubernetesui/metrics-scraper:v1.0.3

          ports:

            - containerPort: 8000

              protocol: TCP

          livenessProbe:

            httpGet:

              scheme: HTTP

              path: /

              port: 8000

            initialDelaySeconds: 30

            timeoutSeconds: 30

          volumeMounts:

          - mountPath: /tmp

            name: tmp-volume

          securityContext:

            allowPrivilegeEscalation: false

            readOnlyRootFilesystem: true

            runAsUser: 1001

            runAsGroup: 2001

      serviceAccountName: kubernetes-dashboard

      nodeSelector:

        "beta.kubernetes.io/os": linux

      # Comment the following tolerations if Dashboard must not be deployed on master

      tolerations:

        - key: node-role.kubernetes.io/master

          effect: NoSchedule

      volumes:

        - name: tmp-volume

          emptyDir: {}

之后执行

kubectl apply -f recommended.yaml

查看dashboard部署情况

kubectl get pod -n kubernetes-dashboard

###############################################

之后在master1上执行

#创建Dashboard的管理用户;

kubectl create serviceaccount dashboard-admin -n kube-system

#将创建的dashboard用户绑定为管理用户;

kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

#获取刚刚创建的用户对应的Token名称;

kubectl get secrets -n kube-system | grep dashboard

#查看Token的详细信息;

kubectl describe secrets -n kube-system $(kubectl get secrets -n kube-system | grep dashboard |awk '{print $1}')

到此搭建完成,利用刚刚创建的token便可以登录

注意需要使用火狐浏览器访问master1的地址,开头也需要添加https,末尾也需要添加端口号进行访问,端口号为recommended.yaml中的配置本例为31001,且已在文章中标黄,可查看其位置

本例访问地址为https://192.168.3.100:31001

谢谢!

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐