随着Kubernetes和CentOs的不断升级,在安装Kubernetes时,网上其他的一些参考资料可能会遇到一些问题。本文以脚本方式列出了所有步骤,这些步骤对于一台纯净的CentOs Stream服务器来说,可以流畅执行并完成Kubernetes安装,并且解决了安装过程中遇到的各种问题。

在安装之前,首先假定已经完成了NFS Server的配置,如果没有完成,可以参考本文后面的脚本。

安装Kubernetes的脚本如下:

#!/bin/bash

# Get current path of the script

SOURCE="$0"
while [ -h "$SOURCE" ]; do
    DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
    SOURCE="$(readlink "$SOURCE")"
    [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"

# Introduce the general functions

if test -f $DIR/centos-functions.sh; then
    source $DIR/centos-functions.sh
fi

# Introduce the release dependending functions

REDHAT_REL=$(get_redhat_release)
if test -f $DIR/centos$REDHAT_REL-functions.sh; then
    source $DIR/centos$REDHAT_REL-functions.sh
fi

# remove podman

if test -n $(rpm -qa | grep podman); then
   yum remove -y podman || error_exit "Failed to remove podman!"
fi

# close swap

if test -n $(swapon -s); then
   swapoff -a || error_exit "Failed to close the swap!"
   sudo sed -i 's/.*swap.*/#&/' /etc/fstab || error_exit "Failed to close the swap!"
fi

# disable selinux

if [ $(sestatus -v | awk {'print $NF'}) != "disabled" ]; then
   setenforce 0 || error_exit "Failed to disable selinux!"
   SELINUX_LINE_NO=$(grep -n "SELINUX=" < /etc/selinux/config | awk -F ':' {'printf $1'})
   sed -i.bak "${SELINUX_LINE_NO}c SELINUX=disabled" /etc/selinux/config
fi

# disable firewalld

systemctl stop firewalld.service || error_exit "Failed to stop firewalld!"
systemctl disable firewalld.service || error_exit "Failed to disable firewalld!"

if test -z "$(rpm -qa docker-ce)"; then
   #  systemctl stop docker
   #  yum erase docker \
   #                  docker-client \
   #                  docker-client-latest \
   #                  docker-common \
   #                  docker-latest \
   #                  docker-latest-logrotate \
   #                  docker-logrotate \
   #                  docker-selinux \
   #                  docker-engine-selinux \
   #                  docker-engine \
   #                  docker-ce
   #  find /etc/systemd -name '*docker*' -exec rm -f {} \;
   #  find /etc/systemd -name '*docker*' -exec rm -f {} \;
   #  find /lib/systemd -name '*docker*' -exec rm -f {} \;
   #  rm -rf /var/lib/docker
   #  rm -rf /var/run/docker
    yum install -y yum-utils  device-mapper-persistent-data lvm2  || error_exit "Failed to install docker!"
    yum-config-manager \
        --add-repo \
        https://download.docker.com/linux/centos/docker-ce.repo
    yum install docker-ce -y || error_exit "Failed to install docker!"
    systemctl start docker
    systemctl enable docker
    docker version 
fi

# setup kubernetes.repo

cat << __EOF__ > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
__EOF__ || error_exit "Failed to create kubernetes repo!"

# modify docker daemon.json

DOCKER_DAEMON_FILE="/etc/docker/daemon.json"
if test ! -f $DOCKER_DAEMON_FILE; then
   cat << __EOF__ > $DOCKER_DAEMON_FILE
{
   "exec-opts": ["native.cgroupdriver=systemd"],
   "registry-mirrors" : ["https://agsw1hla.mirror.aliyuncs.com"]
}
__EOF__ || error_exit "Failed to create docker daemon config file!"
else
   if test -f ${DOCKER_DAEMON_FILE}.bak; then
      rm -f ${DOCKER_DAEMON_FILE}.bak || error_exit "Failed to delete the BACKUP file of docker daemon config!"
   fi
   cp -f ${DOCKER_DAEMON_FILE} ${DOCKER_DAEMON_FILE}.bak
   DRIVER_LINE_NO=$(grep -n "native.cgroupdriver=" < $DOCKER_DAEMON_FILE | awk -F ":" {'printf $1'})
   REGISTRY_MIRRORS_LINE_NO=$(grep -n "registry-mirrors" < $DOCKER_DAEMON_FILE | awk -F ":" {'printf $1'})
   END_LINE_NO=$(grep -n "}" < $DOCKER_DAEMON_FILE | awk -F ":" {'printf $1'})
   if test -z DRIVER_LINE_NO; then
      sed -i "${END_LINE_NO}i , \"exec-opts\": [\"native.cgroupdriver=systemd\"]" $DOCKER_DAEMON_FILE
   else
      sed -i "${DRIVER_LINE_NO}c \"exec-opts\": [\"native.cgroupdriver=systemd\"]," $DOCKER_DAEMON_FILE
   fi
   if test -z REGISTRY_MIRRORS_LINE_NO; then
      sed -i "${END_LINE_NO}i , \"registry-mirrors\" : [\"https://agsw1hla.mirror.aliyuncs.com\"]" $DOCKER_DAEMON_FILE
   else
      sed -i "${REGISTRY_MIRRORS_LINE_NO}c , \"registry-mirrors\" : [\"https://agsw1hla.mirror.aliyuncs.com\"]" $DOCKER_DAEMON_FILE
   fi
fi

# Install k8s

yum install -y kubectl kubelet kubeadm

# modify the kubeadm configuration file

KUBEADM_CONFIG_FILE="/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf"
ARGS_LINE_NO=$(grep -n "KUBELET_KUBECONFIG_ARGS=" < $KUBEADM_CONFIG_FILE | awk -F ":" {'printf $1'})
if test -z "$ARGS_LINE_NO"; then
   error_exit "KUBELET_KUBECONFIG_ARGS not found in $KUBEADM_CONFIG_FILE!"
fi
GREP_RESULT=$(sed -n ${ARGS_LINE_NO}p $KUBEADM_CONFIG_FILE | grep "\-\-cgroup\-driver=")
if test -z "$GREP_RESULT"; then
   ARGS_NEW_LINE=$(grep "KUBELET_KUBECONFIG_ARGS=" $KUBEADM_CONFIG_FILE | sed "s/\"/ --cgroup-driver=systemd\"/2")
   sed -i.bak "${ARGS_LINE_NO}c $ARGS_NEW_LINE" $KUBEADM_CONFIG_FILE
fi

# modify the kubeadm flags env file

KUBEADM_FLAGS_ENV_FILE="/var/lib/kubelet/kubeadm-flags.env"
ARGS_LINE_NO=$(grep -n "KUBELET_KUBEADM_ARGS=" < $KUBEADM_FLAGS_ENV_FILE | awk -F ":" {'printf $1'})
if test -z "$ARGS_LINE_NO"; then
   error_exit "KUBELET_KUBEADM_ARGS not found in $KUBEADM_FLAGS_ENV_FILE!"
fi
GREP_RESULT=$(sed -n ${ARGS_LINE_NO}p $KUBEADM_FLAGS_ENV_FILE | grep "\-\-cgroup\-driver")
if test -z "$GREP_RESULT"; then
   ARGS_NEW_LINE=$(grep "KUBELET_KUBEADM_ARGS=" $KUBEADM_FLAGS_ENV_FILE | sed "s/\"/ --cgroup-driver=systemd\"/2")
   sed -i.bak "${ARGS_LINE_NO}c $ARGS_NEW_LINE" $KUBEADM_FLAGS_ENV_FILE
fi

# Start k8s

systemctl daemon-reload
systemctl restart docker
systemctl enable docker
systemctl enable kubelet
kubeadm init --apiserver-advertise-address=0.0.0.0 --apiserver-cert-extra-sans=127.0.0.1 --image-repository=registry.aliyuncs.com/google_containers --ignore-preflight-errors=all --kubernetes-version=v1.23.5 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.18.0.0/16
if test -z $(kubectl cluster-info | grep "Kubernetes control plane is running at"); then
   echo "Failed to adopt a kubernetes control plane, please check!"
else
   echo "Kubernetes control plane is adopted successfully!"
fi
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# Allow master to be scheduled

kubectl taint nodes --all node-role.kubernetes.io/master-

# Download the following images and save into bundle

# docker pull docker.io/calico/cni:v3.22.1
# docker pull docker.io/calico/pod2daemon-flexvol:v3.22.1
# docker pull docker.io/calico/node:v3.22.1
# docker pull docker.io/calico/kube-controllers:v3.22.1
# docker save -o calico-cni-v3.22.1.tar docker.io/calico/cni:v3.22.1 
# docker save -o calico-pod2daemon-flexvol-v3.22.1.tar docker.io/calico/pod2daemon-flexvol:v3.22.1
# docker save -o calico-node-v3.22.1.tar docker.io/calico/node:v3.22.1
# docker save -o calico-kube-controllers-v3.22.1.tar docker.io/calico/kube-controllers:v3.22.1
# docker save -o nfs-client-provisioner.tar nfs-client-provisioner:latest
# scp -P 35022 root@104.238.131.240:/root/nfs-client-provisioner.tar .
# scp -P 35022 root@104.238.131.240:/root/calico-kube-controllers-v3.22.1.tar .
# scp -P 35022 root@104.238.131.240:/root/calico-node-v3.22.1.tar .
# scp -P 35022 root@104.238.131.240:/root/calico-pod2daemon-flexvol-v3.22.1.tar .
# scp -P 35022 root@104.238.131.240:/root/calico-cni-v3.22.1.tar .
# scp -P 35022 d:\downloads\calico-cni-v3.22.1.tar root@106.55.61.142:/root/bundles
# scp -P 35022 d:\downloads\calico-pod2daemon-flexvol-v3.22.1.tar root@106.55.61.142:/root/bundles
# scp -P 35022 d:\downloads\calico-node-v3.22.1.tar root@106.55.61.142:/root/bundles
# scp -P 35022 d:\downloads\calico-kube-controllers-v3.22.1.tar root@106.55.61.142:/root/bundles
# scp -P 35022 d:\downloads\nfs-client-provisioner.tar root@106.55.61.142:/root/bundles

# install Calico

kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

# prepare NFS configurations

NFS_SERVER_ADDR=$(get_host_ip_addr)
NFS_PATH=$(showmount -e | grep $NFS_SERVER_ADDR | awk {'printf $1'})

if test ! -d $HOME/k8s/nfs; then
   mkdir -p $HOME/k8s/nfs
fi
cat <<- __EOF__ | tee $HOME/k8s/nfs/nfs-rbac.yaml &>/dev/null;
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
   name: nfs-provisioner-runner
   namespace: default
rules:
   -  apiGroups: [""]
      resources: ["persistentvolumes"]
      verbs: ["get", "list", "watch", "create", "delete"]
   -  apiGroups: [""]
      resources: ["persistentvolumeclaims"]
      verbs: ["get", "list", "watch", "update"]
   -  apiGroups: ["storage.k8s.io"]
      resources: ["storageclasses"]
      verbs: ["get", "list", "watch"]
   -  apiGroups: [""]
      resources: ["events"]
      verbs: ["watch", "create", "update", "patch"]
   -  apiGroups: [""]
      resources: ["services", "endpoints"]
      verbs: ["get","create","list", "watch","update"]
   -  apiGroups: ["extensions"]
      resources: ["podsecuritypolicies"]
      resourceNames: ["nfs-provisioner"]
      verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
__EOF__
cat <<- __EOF__ | tee $HOME/k8s/nfs/nfs-deployment.yaml &>/dev/null;
kind: Deployment
apiVersion: apps/v1
metadata:
   name: nfs-client-provisioner
   namespace: default
spec:
   replicas: 1
   selector:
      matchLabels:
         app: nfs-client-provisioner
   strategy:
     type: Recreate
   template:
      metadata:
         labels:
            app: nfs-client-provisioner
      spec:
         serviceAccount: nfs-provisioner
         containers:
            -  name: nfs-client-provisioner
               image: quay.io/external_storage/nfs-client-provisioner:latest
               volumeMounts:
                 -  name: nfs-client-root
                    mountPath:  /persistentvolumes
               env:
                 -  name: PROVISIONER_NAME
                    value: fuseim.pri/ifs
                 -  name: NFS_SERVER
                    value: $NFS_SERVER_ADDR
                 -  name: NFS_PATH
                    value: $NFS_PATH
         volumes:
           - name: nfs-client-root
             nfs:
               server: $NFS_SERVER_ADDR
               path: $NFS_PATH
__EOF__
cat <<- __EOF__ | tee $HOME/k8s/nfs/nfs-sc.yaml &>/dev/null;
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: grafana-nfs
  namespace: default
provisioner: fuseim.pri/ifs
reclaimPolicy: Retain
__EOF__
kubectl apply -f $HOME/k8s/nfs/nfs-rbac.yaml
kubectl apply -f $HOME/k8s/nfs/nfs-sc.yaml
kubectl apply -f $HOME/k8s/nfs/nfs-deployment.yaml

附录一:NFS Server安装过程

该脚本接受4个参数:

参数1:父目录,必须存在,默认为"/mnt/vdb"

参数2:挂载的目录,位于父目录之下,必须为空,如果不存在则自动创建,默认为"nfs",在默认值情况下,挂载的目录全部路径为"/mnt/vdb/nfs"

参数3:选项,默认为rw,sync,root_squash

参数4:允许的访问来源,默认为本机所在子网段。

#!/bin/bash

# Get parameters

if test -n "$1"; then
   NFS_MOUNT_DIR=$1
else
   NFS_MOUNT_DIR="/mnt/vdb"
fi
if test -n "$2"; then
   NFS_DIR=$2
else
   NFS_DIR="nfs"
fi
if test -n "$3"; then
   NFS_OPTIONS=$3
else
   NFS_OPTIONS="rw,sync,root_squash"
fi
if test -n "$4"; then
   NFS_SOURCE=$4
else
   NFS_SOURCE=$(get_host_ip_addr)/24
fi
NFS_EXPORTS_FILE=/etc/exports

# Check the directories

if test ! -d $NFS_MOUNT_DIR; then
    echo "Mount Directory is not exist : $NFS_MOUNT_DIR"
    exit 1
fi
if test -d $NFS_MOUNT_DIR/$NFS_DIR ; then
    if [ "$(ls -A $NFS_MOUNT_DIR/$NFS_DIR)" ]; then
        echo "NFS directory is not empty : $NFS_MOUNT_DIR/$NFS_DIR"
        exit 1
    fi
else
    mkdir $NFS_MOUNT_DIR/$NFS_DIR
    chmod 766 $NFS_MOUNT_DIR/$NFS_DIR
fi

# Install packages and setup services

echo "Setup services ..."
ensure_package_installed rpcbind || error_exit "Failed to install rpcbind!"
ensure_package_installed nfs-utils  || error_exit "Failed to install nfs-utils!"
systemctl enable rpcbind || error_exit "Failed to enable rpcbind!"
systemctl enable nfs-server || error_exit "Failed to enable nfs-server!"
systemctl start rpcbind || error_exit "Failed to start rpcbind!"
systemctl start nfs-server || error_exit "Failed to start nfs-server!"

# Configure the export definition

echo "Setup exports ..."
EXPORT_DEFINITION="$NFS_MOUNT_DIR/$NFS_DIR $NFS_SOURCE($NFS_OPTIONS)"
echo "Export : $EXPORT_DEFINITION"
if test  -f $NFS_EXPORTS_FILE; then
   EXIST_LINE_NO=$(grep -n "^$NFS_MOUNT_DIR\/$NFS_DIR" $NFS_EXPORTS_FILE | cut -d ":" -f 1)
   if test -z $EXIST_LINE_NO; then
      echo $EXPORT_DEFINITION > $NFS_EXPORTS_FILE
   else
      sed -i "${EXIST_LINE_NO}c $EXPORT_DEFINITION" $NFS_EXPORTS_FILE
   fi
else
   echo $EXPORT_DEFINITION > $NFS_EXPORTS_FILE
fi

systemctl restart rpcbind || error_exit "Failed to start rpcbind!"
systemctl restart nfs-server || error_exit "Failed to start nfs-server!"

RESULT=$(showmount -e | egrep -n "^$NFS_MOUNT_DIR\/$NFS_DIR")
if test -n RESULT; then
   echo "NFS export $NFS_MOUNT_DIR/$NFS_DIR is configured successfully."
   exit 0
else
   echo "Error occurs, please check."
   exit 1
fi

##########
# Functions
##########
function get_redhat_release {
    echo $(cat /etc/redhat-release|sed -r 's/.* ([0-9]+)\..*/\1/')
}

function get_host_ip_addr {
    echo $(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d '/')
}

function ensure_package_installed {
    if test -z $(rpm -qa $1); then
        return yum install $1
    else 
        return 0
    fi
}

function error_exit {
  echo "$1" 1>&2
  exit 1
}

附录二 离线安装镜像

需要离线安装的镜像包括Calico 4个镜像(3.22.1)和nfs-client-provisioner镜像。

可参见Calico3.22.1离线镜像包和NFSProvision离线镜像包-kubernetes文档类资源-CSDN下载

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐