环境信息

操作系统主机名IP地址节点配置
centos7.7minimalmaster1192.168.121.11master、etcd、node2C4G

docker版本:18.09.5
rke版本:v1.1.11
kubernetes版本: v1.18.10
资源下载地址:kubectl、rke

下载kubectl v1.18.10
wget https://storage.googleapis.com/kubernetes-release/release/v1.18.10/bin/linux/amd64/kubectl

下载rke v1.1.11
RKE下载地址:https://github.com/rancher/rke/releases/tag/v1.1.11
一、安装docker
#安装docker依赖和yum源
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache

#查看仓库所有docker版本选指定版本安装
yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-18.09.5-3.el7
systemctl start docker
systemctl enable docker

#根据实际环境修改用户名和密码(rke部署需要)
adduser docker -g docker
echo -e "rke@docker\nrke@docker" | passwd docker 

#优化docker配置
cat >>/etc/docker/daemon.json<<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "max-concurrent-downloads": 15,
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "registry-mirrors": ["https://t3xbrfwz.mirror.aliyuncs.com"],
  "insecure-registries":[
   "192.168.121.33"
  ],
  "live-restore": true
}
EOF
systemctl daemon-reload
systemctl restart docker
二、下载RKE部署Kubernetes
#下载好的包放到/usr/bin下并赋予可执行权限
mv rke_linux-amd64 /usr/bin/rke
chmod a+x /usr/bin/rke
mv kubectl /usr/bin/kubectl
chmod a+x /usr/bin/kubectl

#查看rke支持的k8s版本
#rke config --list-version --all 此方法显示信息比较精准
rke config --system-images --all |grep hyperkube
rancher/hyperkube:v1.17.13-rancher1
rancher/hyperkube:v1.15.12-rancher2
rancher/hyperkube:v1.16.15-rancher1
rancher/hyperkube:v1.18.10-rancher1

2、配置当中用户和节点docker用户互信

ssh-keygen -t rsa -P "" -f ~/.ssh/rke_id_rsa
ssh-copy-id -i /root/.ssh/rke_id_rsa.pub docker@192.168.121.11

3、创建rke的cluster.yml文件

mkdir /root/rke-init
cd /root/rke-init
通过rke config创建或者使用下面的配置
#rke config --empty --name cluster.yml
touch /root/rke-init/cluster.yml
cat /root/rke-init/cluster.yml

# An example of an Allinone Kubernetes cluster
nodes:
- address: 192.168.121.11
  port: "22"
  internal_address: 192.168.121.11
  role:
  - controlplane
  - etcd
  - worker
  hostname_override: "master-1"
  user: docker
  ssh_key_path: "~/.ssh/rke_id_rsa"
services:
  etcd:
    snapshot: true
    creation: 5m0s
    retention: 24h
    extra_args:
      quota-backend-bytes: '4294967296'
      max-request-bytes: '33554432'
  kube-api:
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: 30000-32767
    pod_security_policy: false
    always_pull_images: false
  kube-controller:
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
    extra_args:
      node-cidr-mask-size: '24'
      pod-eviction-timeout: '1m'
  kubeproxy:
    extra_args:
      proxy-mode: "ipvs"
  kubelet:
    cluster_domain: cluster.local
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
    extra_args:
      pod-manifest-path: "/etc/kubernetes/manifest/"
      root-dir:  "/var/lib/kubelet"
      docker-root: "/var/lib/docker"
      max-pods: 250
      kube-api-burst: '100'
      kube-api-qps: '100'
      max-open-files: '2000000'
      enforce-node-allocatable: 'pods'
      system-reserved: 'cpu=0.5,memory=500Mi'
      kube-reserved: 'cpu=0.5,memory=1000Mi'
      eviction-hard: 'memory.available<500Mi,nodefs.available<10%,imagefs.available<15%,nodefs.inodesFree<5%'
network:
  plugin: calico
authentication:
  strategy: x509
ssh_key_path: "~/.ssh/rke_id_rsa"
ssh_agent_auth: false
authorization:
  mode: rbac
ignore_docker_version: false
kubernetes_version: "v1.18.10-rancher1-2"
private_registries:
- url: 192.168.121.33
  user: ""
  password: ""
  is_default: false
cluster_name: "kubernetes"
restore:
  restore: false
  snapshot_name: ""

修改kubelet工作目录(可选)

services:
  kubelet:
    extra_args:
      root-dir:  "/data/docker-root/kubelet"
      docker-root: "/data/docker-root/docker"
    extra_binds:
    - "/data/docker-root/kubelet:/data/docker-root/kubelet:shared,z"

开启卷快照(可选)

services:
  kubelet:
    feature-gates:
    - VolumeSnapshotDataSource=true

4、部署k8s

cd /root/rke-init/
rke up
mkdir ~/.kube/
cat kube_config_cluster.yml >~/.kube/config
kubectl version
kubectl get cs
kubectl get node

5、配置kubectl补全

检查bash-completion是否安装,没安装需要yum安装一下
rpm -qa|grep bash-completion
yum -y install bash-completion

source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
三、卸载k8s集群

卸载rke集群

cd /root/rke-init/
rke remove --force

执行清理脚本

df -h|grep kubelet |awk -F % '{print $2}'|xargs umount
rm -rf /var/lib/kubelet/*
rm -rf /etc/kubernetes/*
rm -rf  /var/lib/rancher/*
rm -rf /var/lib/etcd/*
rm -rf /var/lib/cni/*
iptables -F && iptables -t nat -F
docker volume ls|tail -n +2|awk '{print $2}'|xargs docker volume rm
docker ps -q -a|xargs docker rm -f
reboot
四、镜像全量导出和推入harbor仓库

全量导出:

docker images|tail -n +2|awk '{print $1":"$2}'|xargs docker save -o rke-k8sv1.18.10.tar

离线部署镜像下载

docker run -itd --name=rke-k8s registry.baidubce.com/tools/download-rke-k8s:v1.18.10

docker cp rke-k8s:/opt/rke-k8sv1.18.10.tar .
docker cp rke-k8s:/opt/k8sv1.18.10-install-base-pkg.tar.gz .

docker load -i rke-k8sv1.18.10.tar
tar -xf k8sv1.18.10-install-base-pkg.tar.gz
mv helm kubectl rke /usr/bin/
mkdir rke-init
mv cluster.yml rke-init/
修改cluster.yml后安装集群
rke up

全量push到harbor仓库

docker login 192.168.121.33 --username=admin --password=Harbor12345

docker images|tail -n +2|awk '{print "docker tag " $1":"$2 " 192.168.121.33/"$1":"$2}'|bash
docker images|grep 192.168.121.33|awk '{print "docker push "$1":"$2}'|bash
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐