dnf update -y ; reboot

dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo

dnf list docker-ce

#安装依赖包

yum install https://download.docker.com/linux/fedora/30/x86_64/stable/Packages/containerd.io-1.2.6-3.3.fc30.x86_64.rpm

yum install docker-ce -y

安装k8s

在node4上面操作:
    配置主机名互信:
    cat /etc/hosts
        192.168.1.124 node4
        192.168.1.125 node5
        192.168.1.126 node6
    将hosts文件拷贝到node5,node6
    scp   /etc/hosts  node5:/etc
    scp   /etc/hosts  node6:/etc

   可以将dns配置为googole的:

    安装k8s脚本:
    cat images.sh
    #!/bin/bash
    url=registry.cn-hangzhou.aliyuncs.com/google_containers
    version=v1.19.3
    kubeadm config images list --kubernetes-version=$version|awk -F '/' '{print $2}'>>images.txt
    for imagename in `cat images.txt` 
    do
      docker pull $url/$imagename
      docker tag $url/$imagename k8s.gcr.io/$imagename
      docker rmi -f $url/$imagename
    done

    先配置互信:
      ssh-keygen 
      ssh-copy-id -i /root/.ssh/id_rsa.pub node5
      ssh-copy-id -i /root/.ssh/id_rsa.pub node6

    安装docker等基础环境工具:
    yum install docker-ce kubelet kubeadm  kubectl
    ssh node5 "yum install docker-ce kubelet kubeadm  kubectl -y"
    ssh node6 "yum install docker-ce kubelet kubeadm  kubectl -y"

    将images.sh拷贝到node5,node6的/root/目录下
    scp images.sh node5:/root
    scp images.sh node6:/root

    重启docker,安装k8s组件:
    systemctl daemon-reload;systemctl restart docker;sh /root/images.sh
    ssh node5  "systemctl daemon-reload;systemctl restart docker;sh /root/images.sh"
    ssh node6  "systemctl daemon-reload;systemctl restart docker;sh /root/images.sh"

[root@node5 ~]# docker image ls
    REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
    k8s.gcr.io/kube-proxy                v1.19.3             cdef7632a242        3 weeks ago         118MB
    k8s.gcr.io/kube-controller-manager   v1.19.3             9b60aca1d818        3 weeks ago         111MB
    k8s.gcr.io/kube-apiserver            v1.19.3             a301be0cd44b        3 weeks ago         119MB
    k8s.gcr.io/kube-scheduler            v1.19.3             aaefbfa906bd        3 weeks ago         45.7MB
    k8s.gcr.io/etcd                      3.4.13-0            0369cf4303ff        2 months ago        253MB
    k8s.gcr.io/coredns                   1.7.0               bfe3a36ebd25        4 months ago        45.2MB
    k8s.gcr.io/pause                     3.2                 80d28bedfe5d        8 months ago        683kB

    vi /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf  ,如果是ubuntu16,需要编辑这个文件:vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf:
        Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
        Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --fail-swap-on=false"

    swapoff -a
    vi /etc/fstab  将 /dev/mapper/centos-swap swap swap default 0 0 这一行前面加个 # 号将其注释掉。


    systemctl daemon-reload
    systemctl restart kubelet.service 


    kubeadm init --kubernetes-version=v1.19.3 --pod-network-cidr=10.244.0.0/16  --service-cidr=10.96.0.0/12  --ignore-preflight-errors=Swap


    centos8初始化完成:
        Your Kubernetes control-plane has initialized successfully!

        To start using your cluster, you need to run the following as a regular user:

          mkdir -p $HOME/.kube
          sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
          sudo chown $(id -u):$(id -g) $HOME/.kube/config

        You should now deploy a pod network to the cluster.
        Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
          https://kubernetes.io/docs/concepts/cluster-administration/addons/

        Then you can join any number of worker nodes by running the following on each as root:

        kubeadm join 192.168.2.212:6443 --token 3ounvx.jvam6xyu33n61lh1 \
            --discovery-token-ca-cert-hash sha256:493d5da793f54e5d20d34af1fbb10febf120ada0d42d84b2b696aa44d0fd455d

    执行:
        mkdir -p $HOME/.kube
        cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

    查看  kubectl  get cs,如果不健康提示类似:  
        scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
        controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused 
    注释掉/etc/kubernetes/manifests下的kube-controller-manager.yaml和kube-scheduler.yaml的- – port=0。
    然后重启  systemctl restart kubelet.service
    获取节点信息:     kubectl get nodes
    获取所有系统级pods:  kubectl get pods -n kube-system
    获取所有名称空间: kubectl get ns

    安装flanel: 到flannel官方站点,如果kubenet是1.17+直接执行:
        kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    然后查看是否有安装上:kubectl get pods -n kube-system,会显示kube-flannel-ds-nphg8   1/1 
        或者docker image ls会看到flannel的插件

在node5上操作:
    设置docker开机自启: systemctl enable docker kubelet
    加入节点: sudo  kubeadm join 192.168.2.212:6443 --token 3ounvx.jvam6xyu33n61lh1 \
            --discovery-token-ca-cert-hash sha256:493d5da793f54e5d20d34af1fbb10febf120ada0d42d84b2b696aa44d0fd455d --ignore-preflight-errors=Swap
    然后在node4这台master上查看,
        如果加入成功,会看到两个flanel,两个kube-proxy:kubectl get pods -n kube-system -o wide
        可以查看nodes,应该是READY状态,如果是NOT READY,需要等到node5上所有组件都下载完才会准备好:  kubectl  get nodes
        


 

 

来源:https://www.linuxtechi.com/install-docker-ce-centos-8-rhel-8/

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐