一、部署准备
服务器4台

服务器系统名称
192.168.50.230CentOS Linux release 7.9.2009 (Core)k8s-master01
192.168.50.231CentOS Linux release 7.9.2009 (Core)k8s-masker02
192.168.50.232CentOS Linux release 7.9.2009 (Core)k8s-node01
192.168.50.233CentOS Linux release 7.9.2009 (Core)k8s-node02
192.168.50.235虚拟IP,无服务器keepalived

二、系统初始化
1、k8s-master服务器配置hosts文件

[root@k8s-master cyz]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.50.230 k8s-master01
192.168.50.231 k8s-master02
192.168.50.232  k8s-node01
192.168.50.232  k8s-node02

以下4台服务器都要配置
2、配置阿里云yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

3、设置iptables

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

4、关闭swap

swapoff -a

注释掉/etc/fstab关于swap分区挂在行

[root@k8s-master cyz]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Tue Aug 24 13:54:39 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=54eea75f-31ae-4b4f-b946-b9f9dfbaeb9c /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

5、关闭防火墙

# 停止firewall
systemctl stop firewalld.service
# 禁止firewall开机启动
systemctl disable firewalld.service

6、关闭SELINUX

#暂时关闭
setenforce 0
#永久关闭,需重启
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

三、部署docker-ce-19.03.13-3.el7.x86_64(四台都要)

#安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
#安装esp源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#安装docker-ce
yum install -y docker-ce-19.03.13-3.el7.x86_64
#设置开启启动并启动docker
systemctl enable docker  && systemctl  start docker
#设置docker阿里云仓库源
vim /etc/docker/daemon.json

{
  "registry-mirrors": ["https://zo120aja.mirror.aliyuncs.com"]
}
#重启docker
systemctl restart docker

四、安装kubeadm、kubectl、kubelet(四台都要)

yum install -y kubelet-1.18.0-0.x86_64 kubeadm-1.18.0-0.x86_64 kubectl-1.18.0-0.x86_64 -y
#设置kubelet开机自启动
systemctl enable kubelet.service

五、安装 haproxy和keepalived(只需要在k8s-masker服务器部署)
1、部署 haproxy

#安装haproxy
[root@k8s-master01 cyz]# yum install -y haproxy
#配置文件/etc/haproxy/haproxy.cfg
# full configuration options online.
#
#   https://www.haproxy.org/download/2.1/doc/configuration.txt
#   https://cbonte.github.io/haproxy-dconv/2.1/configuration.html
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  kubernetes-apiserver
    mode tcp
    bind *:9443  ## 监听9443端口
    option   tcplog

    default_backend    kubernetes-apiserver

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
 mode        tcp
 balance     roundrobin
 server k8s-master01 192.168.50.230:6443 check
 server k8s-master02 192.168.50.231:6443 check
 #分别在2个master节点启动haproxy
[root@k8s-master01 cyz]# systemctl restart haproxy && systemctl status haproxy
[root@k8s-master01 cyz]# systemctl enable haproxy

2、部署keepalived

[root@k8s-master01 cyz]# yum install -y keepalived
#配置master01的配置文件/etc/keepalived/keepalived.conf
[root@k8s-master01 cyz]# vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
   script_user root
   enable_script_security

}

vrrp_script chk_haproxy {
    script "/bin/bash -c 'if [[ $(netstat -nlp | grep 9443) ]]; then exit 0; else systemctl stop keepalived;fi'"
    interval 2
    weight -2
}

vrrp_instance VI_1 {
  interface ens192

  state MASTER      #配置backup节点master
  virtual_router_id 51
  priority 100      #初始优先级
  nopreempt

  unicast_peer {

  }

  virtual_ipaddress {
    192.168.50.235   #虚拟ip
  }

  authentication {
    auth_type PASS
    auth_pass password
  }

  track_script {
      chk_haproxy
  }
}

#配置master02的配置文件/etc/keepalived/keepalived.conf
[root@k8s-master02 cyz]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   script_user root
   enable_script_security

}

vrrp_script chk_haproxy {
    script "/bin/bash -c 'if [[ $(netstat -nlp | grep 9443) ]]; then exit 0; else systemctl stop keepalived;fi'"
    interval 2
    weight -2
}

vrrp_instance VI_1 {
  interface ens192

  state BACKUP     #配置backup节点
  virtual_router_id 51
  priority 90      #初始优先级
  nopreempt

  unicast_peer {

  }

  virtual_ipaddress {
    192.168.50.235    #虚拟ip
  }

  authentication {
    auth_type PASS
    auth_pass password
  }

  track_script {
      chk_haproxy
  }

}

分别在2台master节点启动keepalived

[root@k8s-master01 cyz]# systemctl start keepalived && systemctl status keepalived
[root@k8s-master01 cyz]# systemctl enable keepalived

部署k8s 节点
1、初始化master主节点
创建初始化配置文件 kubeadm-config.yaml

[root@k8s-master01 cyz]#vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.50.235:9443   ### 虚拟VIP + haproxy暴露的端口
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd          ###  修改etcd数据目录,也可以默认
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers     ###镜像仓库地址
kind: ClusterConfiguration
kubernetesVersion: v1.18.0     ### kubernetes版本
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12    #容器分配的IP的地址,随意,不可已现有网络有ip冲突
  podSubnet: 10.244.0.0/16       #配置pod网络,要与flannel网络组件同一网段
scheduler: {}

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

3、执行创建集群

#这里追加tee命令将初始化日志输出到kubeadm-init.log中以备用(可选)。
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
#该命令指定了初始化时需要使用的配置文件,其中添加–experimental-upload-certs参数可以在后续执行加入节点时自动分发证书文件。

4、配置kubectl命令

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

5、其他master加入集群

 [root@k8s-master01 cyz]# kubeadm join 192.168.50.235:9443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:ec7ec903b411378a41d55c9b79039209b4db5c746ee5d4bdef535627696fe148 --control-plane --certificate-key 5f34ee62670931d4881c1d2aa0b63a73fb0e7c82141dc6218ca36326bd9a2248

5、其他node节点加入

kubeadm join 192.168.50.235:9443 --token 0art7m.ojz99obe6n873ed2 --discovery-token-ca-cert-hash sha256:ec7ec903b411378a41d55c9

六、部署网络插件
部署flannel网络,创建kube-flannel.yml

[root@k8s-master01 system]# vim  kube-flannel.yml 
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg

查看etcd集群状态

kubectl -n kube-system exec etcd-k8s-master1 -- etcdctl --endpoints=https://192.168.50.230:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐