Kubernetes安装及部署服务

一、k8s集群安装

1、安装准备

  • 配置linux环境
#关闭防火墙
sudo systemctl stop firewalld
sudo systemctl disable firewalld
# 关闭 selinux
sudo sed -i 's/enforcing/disabled/' /etc/selinux/config
#关闭swap分区
sudo sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a
#设置主机名
sudo hostnamectl set-hostname <hostname>
#添加 hosts
cat >> /etc/hosts << EOF
19.16.1.21 iids01
19.16.1.22 iids02
19.16.1.23 iids03
19.16.1.24 iids04
19.16.1.25 iids05
EOF
#将桥接的 IPv4 流量传递到 iptables 的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#生效
sudo sysctl --system
#时间同步
sudo yum install ntpdate -y
sudo ntpdate time.windows.com
  • 安装docker
#下载docker安装包
sudo wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#指定安装版本
sudo yum -y install docker-ce-18.06.1.ce-3.el7
#设置开机启动
sudo systemctl enable docker && systemctl start docker
#查看docker版本
sudo docker --version
  • 添加阿里云 YUM 软件源
#添加docker阿里仓库
cat > /etc/docker/daemon.json << EOF
{
	"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF
#重启docker
systemctl restart docker
#添加k8s的国内yum源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

二、安装k8s相关组件

#安装组件
sudo yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
#设置开机启动,不能启动kubelet,否则会导致无法启动kubectl
sudo systemctl enable kubelet

三、部署k8s-master节点

1、初始化master节点

sudo kubeadm init \
--apiserver-advertise-address=19.16.1.21 \  #master节点IP
--image-repository registry.aliyuncs.com/google_containers \ #指定拉取仓库
--kubernetes-version v1.19.0 \ #指定k8s版本
--service-cidr=10.96.0.0/12 \ #指明pod网络可以使用的IP地址段
--pod-network-cidr=10.244.0.0/16 #为service的虚拟IP地址另外指定IP地址段
#显示成功
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.40.129:6443 --token tc07ud.840lwd2lmqt3wi9n \
    --discovery-token-ca-cert-hash sha256:0ca310d43b5dd0954c893800c99b6894f809fa185e4882a40442e24ba750a47a 
#切记执行k8s提示的语句,启动主节点,否则会出现error: no configuration has been provided, try setting KUBERNETES_MASTER environment variable
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

2、node节点加入集群

sudo kubeadm join 192.168.40.129:6443 --token tc07ud.840lwd2lmqt3wi9n \
--discovery-token-ca-cert-hash sha256:0ca310d43b5dd0954c893800c99b6894f809fa185e4882a40442e24ba750a4

注:token有效期24小时,过去失效,如获取新的使用命令

sudo kubeadm token create --print-join-command

查看集群状态

sudo kubectl get nodes
#显示集群状态
NAME         STATUS     ROLES    AGE     VERSION
k8s-master   NotReady   master   12m     v1.18.0
k8s-node1    NotReady   <none>   6m17s   v1.18.0
k8s-node2    NotReady   <none>   6m9s    v1.18.0

注:如命令发生错误如下 :error: no configuration has been provided, try setting KUBERNETES_MASTER environment variable

vim /etc/profile
#追加
export KUBECONFIG=/etc/kubernetes/admin.conf
#执行
source /etc/profile

四、安装网络插件 flannel

1、下载flannel插件

由于k8s集群网络不同,status状态为NotReady,需使用flannel网络组件是k8s网络畅通。由于flannel为外网地址,如无法下载可下载镜像文件后导入docker中,flannel下载网盘地址:
百度网盘:https://pan.baidu.com/s/1O4T80pJGuDrbY_-rEQLyUQ 提取码:phhv
官方地址: https://github.com/coreos/flannel/releases,下载对应版本

#导入镜像文件
sudo docker load -i flannel.tar #百度网盘安装
sudo docker load < flanneld-v0.12.0-amd64.docker #官网安装

2、编辑kube-flannel.yml

编辑kube-flannel.yml,修改对应镜像名称

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.13.0  #对应下载镜像
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.13.0 #对应下载镜像
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

3、启动flannel pod

sudo kubectl apply -f kube-flannel.yml
#查看节点情况,集群搭建完成
sudo kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   16h   v1.18.0
k8s-node1    Ready    <none>   15h   v1.18.0
k8s-node2    Ready    <none>   15h   v1.18.0

五、k8s证书有效期

1、查看证书有效期

sudo kubeadm alpha certs check-expiration
CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Sep 04, 2021 08:45 UTC   99y                                     no      
apiserver                  Sep 04, 2021 08:45 UTC   99y             ca                      no      
apiserver-etcd-client      Sep 04, 2021 08:45 UTC   99y             etcd-ca                 no      
apiserver-kubelet-client   Sep 04, 2021 08:45 UTC   99y             ca                      no      
controller-manager.conf    Sep 04, 2021 08:45 UTC   99y                                     no      
etcd-healthcheck-client    Sep 04, 2021 08:45 UTC   99y             etcd-ca                 no      
etcd-peer                  Sep 04, 2021 08:45 UTC   99y             etcd-ca                 no      
etcd-server                Sep 04, 2021 08:45 UTC   99y             etcd-ca                 no      
front-proxy-client         Sep 04, 2021 08:45 UTC   99y             front-proxy-ca          no      
scheduler.conf             Sep 04, 2021 08:45 UTC   99y                                     no      
CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Sep 21, 2030 09:09 UTC   9y              no      
etcd-ca                 Sep 21, 2030 09:09 UTC   9y              no      
front-proxy-ca          Sep 21, 2030 09:09 UTC   9y              no      

2、修改证书有效期

  • 查看kubeadm版本号
sudo kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.0", GitCommit:"9e991415386e4cf155a24b1da15becaa390438d8", GitTreeState:"archive", BuildDate:"2020-09-28T08:21:05Z", GoVersion:"go1.13.6", Compiler:"gc", Platform:"linux/amd64"}
  • 在github https://github.com/kubernetes/kubernetes/releases 下载对应版本源码
  • 百度网盘地址 链接:https://pan.baidu.com/s/1F41mjXfOteWJScRdAv1fEA 提取码:3jg0
  • 解压源码
sudo tar -zxvf v1.18.0.tar.gz
sudo mv kubernetes-1.18.0 kubernetes
sudo cd kubernetes
  • 修改配置文件过期时间配置
sudo vi ./staging/src/k8s.io/client-go/util/cert/cert.go

// 这个方法里面NotAfter:              now.Add(duration365d * 10).UTC()
// 默认有效期就是10年,改成100年
// 按/NotAfter查找
func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
        now := time.Now()
        tmpl := x509.Certificate{
                SerialNumber: new(big.Int).SetInt64(0),
                Subject: pkix.Name{
                        CommonName:   cfg.CommonName,
                        Organization: cfg.Organization,
                },
                NotBefore:             now.UTC(),
                // NotAfter:              now.Add(duration365d * 10).UTC(),
                NotAfter:              now.Add(duration365d * 100).UTC(),
                KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
                BasicConstraintsValid: true,
                IsCA:                  true,
        }
        certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
        if err != nil {
                return nil, err
        }
        return x509.ParseCertificate(certDERBytes)
sudo vi ./cmd/kubeadm/app/constants/constants.go
// 就是这个常量定义CertificateValidity,改成*100年
const (
      // KubernetesDir is the directory Kubernetes owns for storing various configuration files
      KubernetesDir = "/etc/kubernetes"
      // ManifestsSubDirName defines directory name to store manifests
      ManifestsSubDirName = "manifests"
      // TempDirForKubeadm defines temporary directory for kubeadm
      // should be joined with KubernetesDir.
      TempDirForKubeadm = "tmp"
      // CertificateValidity defines the validity for all the signed certificates generated by kubeadm
      // CertificateValidity = time.Hour * 24 * 365
      CertificateValidity = time.Hour * 24 * 365 * 100
      // CACertAndKeyBaseName defines certificate authority base name
      CACertAndKeyBaseName = "ca"
      // CACertName defines certificate name
      CACertName = "ca.crt"
      // CAKeyName defines certificate name
      CAKeyName = "ca.key"

3、编译源码

  • 查看 kube-cross 的 TAG 版本号
sudo cat ./build/build-image/cross/VERSION  
v1.13.8-1

这里我们可以使用官方容器对代码进行编译:k8s.gcr.io/kube-cross:v1.13.6-1(当前只有1.13.6而不是1.13.8)

  • 拉取对应版本编译环境docker镜像
sudo docker pull gcrcontainer/kube-cross:v1.13.6-1

#如无法下载可以使用阿里镜像源 docker pull registry.aliyuncs.com/google_containers/kube-cross:v1.13.6-1

  • 进行编辑

    # -v [源码下载目录]:/go/src/k8s.io/kubernetes
    sudo docker run --rm -v /root/kubernetes:/go/src/k8s.io/kubernetes -it gcrcontainer/kube-cross:v1.13.6-1 bash
    cd /go/src/k8s.io/kubernetes
    # 编译kubeadm
    make all WHAT=cmd/kubeadm GOFLAGS=-v
    # 编译kubelet
    make all WHAT=cmd/kubelet GOFLAGS=-v
    # 编译kubectl
    make all WHAT=cmd/kubectl GOFLAGS=-v
    # 退出容器
    exit
    #编译完产物在 _output/bin/kubeadm 目录下,
    #其中bin是使用了软连接
    #真实路径是_output/local/bin/linux/amd64/kubeadm
    mv /usr/bin/kubeadm /usr/bin/kubeadm_backup
    cp _output/local/bin/linux/amd64/kubeadm /usr/bin/kubeadm
    chmod +x /usr/bin/kubeadm
    

4、执行命令更新证书

#续订全部证书
sudo kubeadm alpha certs renew all
#查看证书有效期,全部变成100年
sudo kubeadm alpha certs check-expiration
CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Sep 04, 2120 08:45 UTC   99y                                     no      
apiserver                  Sep 04, 2120 08:45 UTC   99y             ca                      no      
apiserver-etcd-client      Sep 04, 2120 08:45 UTC   99y             etcd-ca                 no      
apiserver-kubelet-client   Sep 04, 2120 08:45 UTC   99y             ca                      no      
controller-manager.conf    Sep 04, 2120 08:45 UTC   99y                                     no      
etcd-healthcheck-client    Sep 04, 2120 08:45 UTC   99y             etcd-ca                 no      
etcd-peer                  Sep 04, 2120 08:45 UTC   99y             etcd-ca                 no      
etcd-server                Sep 04, 2120 08:45 UTC   99y             etcd-ca                 no      
front-proxy-client         Sep 04, 2120 08:45 UTC   99y             front-proxy-ca          no      
scheduler.conf             Sep 04, 2120 08:45 UTC   99y                                     no      
CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Sep 21, 2030 09:09 UTC   9y              no      
etcd-ca                 Sep 21, 2030 09:09 UTC   9y              no      
front-proxy-ca          Sep 21, 2030 09:09 UTC   9y              no

六、安装dashboard

1、根据 yaml部署dashboard

sudo kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
#离线资源清单
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.3
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

2、设置svc为NodePort

kubectl  patch svc kubernetes-dashboard -n kubernetes-dashboard -p '{"spec":{"type":"NodePort","ports": [{"port":443,"targetPort":8443,"nodePort":30443}]}}'

3、配置认证方式

  • 创建dashboard-adminuser.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
 name: admin-user
 namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
 name: admin-user
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: ClusterRole
 name: cluster-admin
subjects:
- kind: ServiceAccount
 name: admin-user
 namespace: kubernetes-dashboard  
  • 创建登陆用户
sudo kubectl apply -f dashboard-adminuser.yaml
  • 查看admin-user账户的token
sudo kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')

Name:         admin-user-token-52c8r
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 6e48fddd-37c9-4b2c-8565-5127a91d2ea2
Type:  kubernetes.io/service-account-token
Data
====
ca.crt:     1025 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Im9UcEZ4dmtoRTJxWlI0U2YtbDkzQjVQSGtKSTF6Q1EtVDUyUDViSXdDc3MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTUyYzhyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2ZTQ4ZmRkZC0zN2M5LTRiMmMtODU2NS01MTI3YTkxZDJlYTIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.r27xXvDjQt1hhbg8IfKxHKD0ZbC4gGqnkSL0HRwUsss-GiSAXKNtbkDt-upVNI00gBuqKlHbUKMQOt6AYWSvFmuYBeSlzuGfkh8awN3dSg7ryaVXKyhiuEtufkLIEOlCx5d1RAsruWo-3SryZX6acp1YDZSb-Xmu1JvxSd85LPE4c8MG9nqSDFwKRtR93VHQTXzwdcEhCb_H-sWJcAP4LSqzg1pFHvi26ZLsWeZwgMuihDeQeRapDJ5MHMvKiuzMiGwmvRSH8RKRbS9gB0A3uRHaOazVeFdfRjFEYVwrM8WVuD8BLhVxyg6stXBpdgkyqhe0SRhF32vAOake-r0BAQ
  • 浏览器登陆任意node节点IP+30443,填入获取的token

    image-20200929170418952
  • 即可登陆dashboard界面

  • image-20200929170521748

七、安装harbor私有镜像仓库

1、安装docker-compose

#先安装 docker-ce-18.06.1.ce-3.el7版本
# 下载docker-compose
# 链接:https://pan.baidu.com/s/1luM8rj6InNCddY9xCKo48Q  提取码:l91z 
#上传/usr/local/bin 目录
#给docker-compose添加执行权限
sudo chmod +x /usr/local/bin/docker-compose
#查看docker-compose是否安装成功
docker-compose -version

2、安装Harbor

  • 下载Harbor的压缩包

    链接:https://pan.baidu.com/s/1W0eawaqMmq3ijx-jvrQqXQ 提取码:acby

tar -xzf harbor-offline-installer-v2.1.0.tgz
#修改配置文件
vi harbor.yml 
hostname: 本机外网IP(切记是外网地址,否则docker客户端无法登陆,坑了一天)
port: 端口
harbor_admin_password:默认admin密码
#注释配置
# https related config
#https:
  # https port for harbor, default is 443
  #port: 443
  # The path of cert and key files for nginx
  #certificate: /your/certificate/path
  #private_key: /your/private/key/path
  
#安装Harbor
./prepare
./install.sh
#启动Harbor
docker-compose up -d 启动
docker-compose stop 停止
docker-compose restart 重新启动

3、配置docker私有镜像源

#把Harbor地址加入到Docker信任列表
vi /etc/docker/daemon.json
"insecure-registries": ["http://IP:端口"]
#重启Docker
systemctl daemon-reload
systemctl restart docker
#登陆harbor
docker login -u 用户名 -p 密码 http://IP:端口

4、推送镜像到私有镜像仓库

#对原镜像打tag
docker tag 原镜像名称:版本号 私有镜像仓库IP:端口/项目名称/镜像名称:版本号
#推送镜像
docker push 私有镜像仓库IP:端口/项目名称/镜像名称:版本号

八、k8s部署SpringCloud微服务项目

1、创建命名空间

kubectl create namespace iids<空间名称>

2、创建私有镜像密钥Secret

kubectl create secret docker-registry <Secret名称> \
     --docker-server=<harbor地址> \
     --docker-username=<账号> \
     --docker-password=<密码> \
     --docker-email=<随意邮箱>

3、部署springcloud微服务项目

(1) 部署方式选择
  • 微服务项目根据微服务类型分为两种部署,两种网络暴露。

    1. 有状态服务,如eureka、job、数据库等中间件,使用StatefulSet控制器部署,其它无状态微服务使用Deployment控制器部署。

    2. k8s内部访问微服务,service使用ClusterIP类型对内部进行暴露;外部访问微服务,service使用NodePort对外进行暴露。

(2) 部署有状态服务
  • 以eureka服务为例

    1. 修改eureka服务配置文件
    server:
      port: 10000
    spring:
      application:
        name: iids-msa-eureka
    eureka:
      server:
        #关闭自我保护
        enable-self-preservation: false
        use-read-only-response-cache: false
        #设置自动清理时间
        eviction-interval-timer-in-ms: 5000
      client:
        registry-fetch-interval-seconds: 5
        #注册中心职责是维护服务实例,false:不检索服务。
        fetch-registry: true
        #此应用为注册中心,false:不向注册中心注册自己。
        register-with-eureka: true
        service-url:
          defaultZone: ${EUREKA_SERVER:http://127.0.0.1:${server.port}/eureka/}
      instance:
        hostname: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}
        lease-renewal-interval-in-seconds: 5
        lease-expiration-duration-in-seconds: 10
        instance-id: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}:${server.port}@${random.l ong(1000000,9999999)}
    
    1. 编译StatefulSet、service等iids-eureka.yml
    #service
    ---
    apiVersion: v1
    kind: Service
    metadata:
      namespace: iids
      name: iids-eureka
      labels:
        app: iids-eureka
    spec:
      type: NodePort
      ports:
        - port: 10000
          name: iids-eureka
          targetPort: 10000
      selector:
        app: iids-eureka
    ---
    #StatefulSet
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      namespace: iids
      name: iids-eureka
    spec:
      serviceName: "iids-eureka"
      replicas: 2
      selector:
        matchLabels:
          app: iids-eureka
      template:
        metadata:
          labels:
            app: iids-eureka
        spec:
          imagePullSecrets:
            - name: harbor_secret   #对应创建私有镜像密钥Secret
          containers:
            - name: iids-msa-eureka
              image: 123.56.18.37:25100/iids/iids-msa-eureka:1.0.0
              ports:
              - containerPort: 10000
              env:
                - name: MY_POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: EUREKA_SERVER
                  value:  "http://iids-eureka-0.iids-eureka.iids:10000/eureka/,http://iids-eureka-1.iids-eureka.iids:10000/eureka/"
                - name: EUREKA_INSTANCE_HOSTNAME
                  value: ${MY_POD_NAME}.iids-eureka.iids
                - name: ACTIVE
                  value: "-Dspring.profiles.active=k8s"
      podManagementPolicy: "Parallel"
    
    1. 执行命令
    kubectl apply -f iids-eureka.yml
    
(3) 部署内部访问服务
  1. 修改微服务配置文件

    eureka:
      client: #客户端注册进eureka服务列表内
        service-url:
          defaultZone: ${EUREKA_SERVER}
    
  2. 编译Deployment、service等iids-flat-config.yml

    #service
    ---
    apiVersion: v1
    kind: Service
    metadata:
      namespace: iids
      name: iids-flat-config
      labels:
        app: iids-flat-config
    spec:
      type: ClusterIP
      ports:
        - port: 30400
          name: iids-flat-config
          targetPort: 30400
      selector:
        app: iids-flat-config
    ---
    #Deployment
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      namespace: iids
      name: iids-flat-config
    spec:
      replicas: 2
      selector:
        matchLabels:
          app: iids-flat-config
      template:
        metadata:
          labels:
            app: iids-flat-config
        spec:
          imagePullSecrets:
            - name: harbor_secret
          containers:
            - name: iids-flat-config
              image: 123.56.18.37:25100/iids/iids-flat-config:1.0.0
              #拉取镜像策略
              #Always 总是拉取镜像
    		 #IfNotPresent 本地有则使用本地镜像,不拉取
    		 #Never 只使用本地镜像,从不拉取,即使本地没有
              #如果省略imagePullPolicy  策略为IfNotPresent
              imagePullPolicy: Always
              ports:
              - containerPort: 30100
              env:
                - name: EUREKA_SERVER
                  value:  "http://iids-eureka-0.iids-eureka.iids:10000/eureka/,http://iids-eureka-1.iids-eureka.iids:10000/eureka/"
                - name: ACTIVE
                  value: "-Dspring.profiles.active=k8s"
    
(4) 部署外部访问服务
  1. 修改微服务配置文件

    eureka:
      client: #客户端注册进eureka服务列表内
        service-url:
          defaultZone: ${EUREKA_SERVER}
    
  2. 编译Deployment、service等iids-msa-flat-zuul.yml

    #service
    ---
    apiVersion: v1
    kind: Service
    metadata:
      namespace: iids
      name: iids-msa-flat-zuul
      labels:
        app: iids-msa-flat-zuul
    spec:
      type: NodePort
      ports:
        - port: 10100
          name: iids-msa-flat-zuul
          targetPort: 10100
      selector:
        app: iids-msa-flat-zuul
    ---
    #Deployment
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      namespace: iids
      name: iids-msa-flat-zuul
    spec:
      replicas: 2
      selector:
        matchLabels:
          app: iids-msa-flat-zuul
      template:
        metadata:
          labels:
            app: iids-msa-flat-zuul
        spec:
          imagePullSecrets:
            - name: harbor_secret
          containers:
            - name: iids-flat-config
              image: 123.56.18.37:25100/iids/iids-msa-flat-zuul:1.0.0
              imagePullPolicy: Always
              ports:
              - containerPort: 30100
              env:
                - name: EUREKA_SERVER
                  value:  "http://iids-eureka-0.iids-eureka.iids:10000/eureka/,http://iids-eureka-1.iids-eureka.iids:10000/eureka/"
                - name: ACTIVE
                  value: "-Dspring.profiles.active=k8s"
    
(5) 注意事项
  1. 镜像拉取策略
containers:
- name: iids-flat-config
  image: 123.56.18.37:25100/iids/iids-flat-config:1.0.0
  imagePullPolicy: Always
#拉取镜像策略
#Always 总是拉取镜像
#IfNotPresent 本地有则使用本地镜像,不拉取
#Never 只使用本地镜像,从不拉取,即使本地没有
#如果省略imagePullPolicy  策略为IfNotPresent
  2. Kubernetes 调整 nodePort 端口范围,修改默认30000-32767端口范围
#编辑k8s配置文件
vim /etc/kubernetes/manifests/kube-apiserver.yaml
#添加 - --service-node-port-range=1-65535 
spec:
  containers:
  - command:
    - kube-apiserver
    - --service-node-port-range=1-65535  #只需添加到本行即可
    - --advertise-address=192.168.0.13
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt

九、HAP-Pod动态扩缩容

1、pod资源限制

(1)资源限制计算

在K8s的资源:
 CPU:
  我们知道2核2线程的CPU,可被系统识别为4个逻辑CPU,在K8s中对CPU的分配限制是对逻辑CPU做分片限制的。
  也就是说分配给容器一个CPU,实际是分配一个逻辑CPU。
  而且1个逻辑CPU还可被单独划分子单位,即 1个逻辑CPU,还可被划分为1000个millicore(毫核), 简单说就是1个逻辑CPU,继续逻辑分割为1000个豪核心。
  豪核:可简单理解为将CPU的时间片做逻辑分割,每一段时间片就是一个豪核心。
  所以:500m 就是500豪核心,即0.5个逻辑CPU.

​ 内存:
  K,M,G,T,P,E #通常这些单位是以1000为换算标准的。
  Ki, Mi, Gi, Ti, Pi, Ei #这些通常是以1024为换算标准的。

(2)在资源清单中配置
containers:
- name: iids-flat-config
  image: 123.56.18.37:25100/iids/iids-flat-config:1.0.0
  imagePullPolicy: Always
  resources:
      requests:  #是需求限制,也叫软限制
        cpu: "500m"
        memory: "512Mi"
      limits: #最大限制,也叫硬限制
        cpu: "500m"
        memory: "512Mi"
#通常来说:Limits >= Requests,并且requests 和 limits 通常要一起配置,若只配置了requests,而不配置limits,则很可能导致Pod会吃掉所有资源。

详细资料:查阅 https://www.cnblogs.com/wn1m/p/11291235.html

2、部署k8s资源监控 metrics-server

(1)下载metrics-server资源清单
# 链接:https://pan.baidu.com/s/1BGdi5cO6E72lIaaVBSPasQ  提取码:ybv9 下列配置已修改
(2) 修改镜像

将国外镜像改为国内镜像,打开metrics-server\deploy\1.8+,修改metrics-server-deployment.yaml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      hostNetwork: true  #修改此处
      serviceAccountName: metrics-server
      volumes:
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
      - name: tmp-dir
        emptyDir: {}
      containers:
      - name: metrics-server
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6  #修改为国内镜像
        imagePullPolicy: Always
	   #添加下列两项参数
        args:
          - --kubelet-insecure-tls
          - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        volumeMounts:
        - name: tmp-dir
          mountPath: /tmp
(3)启动metrics-server
#跳转资源清单目录
cd 资源清单目录路径
#启动所有资源清单
kubectl apply -f .
#查看
kubectl get pod -n kube-system
coredns-7ff77c879f-nvv8g            1/1     Running            10         18d
coredns-7ff77c879f-vbdlm            1/1     Running            9          18d
etcd-k8smaster                      1/1     Running            10         18d
kube-apiserver-k8smaster            1/1     Running            0          3h44m
kube-controller-manager-k8smaster   1/1     Running            11         18d
kube-flannel-ds-7mmvf               1/1     Running            2          18d
kube-flannel-ds-tp9vs               1/1     Running            12         18d
kube-proxy-k5sb7                    1/1     Running            2          18d
kube-proxy-zcbgz                    1/1     Running            9          18d
kube-scheduler-k8smaster            1/1     Running            11         18d
metrics-server-5f55b696bd-d279n     1/1     Running            0          39m 
#出现metrics-server-123123123说明metrics-server启动成功
#查看k8s资源情况
kubectl top node #查看node资源
kubectl top pod -n 命名空间  #查看pod资源

3、微服务创建HPA

(1)创建微服务HPA资源清单

​ vim iids-app-config.yml

apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
  name: iids-app-config
  namespace: iids
spec:
  minReplicas: 2   #至少1个副本
  maxReplicas: 5   #最多5个副本
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: iids-app-config #创建的deploment文件里面的name
  metrics:
  - type: Resource
    resource:
      name: cpu
      targetAverageUtilization: 50  #注意此时是根据使用率,也可以根据使用量:targetAverageValue
  - type: Resource
    resource:
      name: memory
      targetAverageUtilization: 50  #注意此时是根据使用率,也可以根据使用量:targetAverageValue
(2)启动HPA
#启动HPA
kubectl apply -f iids-app-config.yml
#查看HPA
kubectl get hpa -n iids
NAME              REFERENCE                    TARGETS           MINPODS   MAXPODS   REPLICAS   AGE
iids-app-config   Deployment/iids-app-config   36%/50%, 0%/50%   2         5         3          5h43m
  1. 扩容:当CUP或者内存超过50%,HPA会根据自身算法进行扩容,启动Pod,达到满足资源的pod数量
  2. 缩容:当CUP或者内存低于50%,HPA会根据自身算法进行缩容,停止Pod,达到满足资源的pod数量
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐