6. 复习: 重新安装集群

bilibili视屏地址: 6.复习-重新安装k8s集群_哔哩哔哩 (゜-゜)つロ 干杯~-bilibili

1. 资源准备,所有节点

拉取k8s和flannel网络插件的镜像
这里提供百度云离线包,suveng-k8s-image.tar.gz

链接:https://pan.baidu.com/s/1lty5BLoz4eSBC7fKpSfj8A 提取码:eftw

下载离线包,上传到虚拟机内/root目录下

cd /root
tar -zxvf suveng-k8s-image.tar.gz

# 导入镜像
docker load -i suveng/k8s.gcr.io-kube-proxy.tar
docker load -i suveng/k8s.gcr.io-kube-apiserver.tar
docker load -i suveng/k8s.gcr.io-kube-controller-manager.tar
docker load -i suveng/k8s.gcr.io-kube-scheduler.tar
docker load -i suveng/k8s.gcr.io-coredns.tar
docker load -i suveng/k8s.gcr.io-etcd.tar
docker load -i suveng/k8s.gcr.io-pause.tar
docker load -i suveng/flannel.tar

# 重新打标签
docker tag suveng/k8s.gcr.io-kube-apiserver:v1.15.0 k8s.gcr.io/kube-apiserver:v1.15.0

docker tag suveng/k8s.gcr.io-kube-scheduler:v1.15.0 k8s.gcr.io/kube-scheduler:v1.15.0

docker tag suveng/k8s.gcr.io-kube-controller-manager:v1.15.0 k8s.gcr.io/kube-controller-manager:v1.15.0


docker tag suveng/k8s.gcr.io-kube-proxy:v1.15.0 k8s.gcr.io/kube-proxy:v1.15.0

docker tag suveng/k8s.gcr.io-etcd:3.3.10 k8s.gcr.io/etcd:3.3.10

docker tag suveng/k8s.gcr.io-pause:3.1 k8s.gcr.io/pause:3.1

docker tag suveng/k8s.gcr.io-coredns:1.3.1 k8s.gcr.io/coredns:1.3.1

# 删除自己的标签
docker rmi suveng/k8s.gcr.io-kube-apiserver:v1.15.0 

docker rmi suveng/k8s.gcr.io-kube-scheduler:v1.15.0 

docker rmi suveng/k8s.gcr.io-kube-controller-manager:v1.15.0 


docker rmi suveng/k8s.gcr.io-kube-proxy:v1.15.0 

docker rmi suveng/k8s.gcr.io-etcd:3.3.10 

docker rmi suveng/k8s.gcr.io-pause:3.1 

docker rmi suveng/k8s.gcr.io-coredns:1.3.1 

2. 环境配置,所有节点

# 安装kubelet kubeadm kubectl
cat <<EOF > /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install kubeadm-1.15.0 kubectl-1.15.0 kubelet-1.15.0 # 安装组件


# 启动kublet,并开机自启动
systemctl start kubelet 

systemctl enable kubelet


# centos7用户还需要设置路由
yum install -y bridge-utils.x86_64

# 加载br_netfilter模块,使用lsmod查看开启的模块
modprobe  br_netfilter  

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# 重新加载所有配置文件
sysctl --system  

# k8s要求关闭swap  (qxl)
swapoff -a && sysctl -w vm.swappiness=0  # 关闭swap
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab  # 取消开机挂载swap

3. 安装k8s集群master

# master节点初始化,配置网络,应用flannel网络,注意这里的flannel网络的pod内网地址默认是10.244.0.0/16,在master初始化时指定内网地址
kubeadm init --apiserver-advertise-address <master_ip> --pod-network-cidr 10.244.0.0/16 --kubernetes-version 1.15.0

# 初始化完配置kubectl环境
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 保存打印出来的下面的提示,用于初始化worker节点, 复制自己的, 不是复制这篇文章的

kubeadm join 192.168.0.205:6443 --token fj6m98.tlsh8w89o27ojbqc \
    --discovery-token-ca-cert-hash sha256:e7ae2669a443be902feaf912c115662f3d238c807b41704a803308fdc6625a59


# 配置网络,应用flannel网络,注意这里的flannel网络的pod内网地址默认是10.244.0.0/16,在master初始化时指定内网地址
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml

# 检测master节点
kubectl get node

# 查看kubelet的日志

journalctl -fu kubelet

4. 安装k8s集群worker


# 保存打印出来的下面的提示,用于初始化worker节点, 复制自己的, 不是复制这篇文章的
kubeadm join 192.168.0.205:6443 --token fj6m98.tlsh8w89o27ojbqc \
    --discovery-token-ca-cert-hash sha256:e7ae2669a443be902feaf912c115662f3d238c807b41704a803308fdc6625a59

5. worker节点配置kubectl

配置shell的全局变量KUBECONFIG为master节点的 /etc/kubenetes/config

vi /etc/profile

KUBECONFIG='/root/config'
export KUBECONFIG

6. 推荐使用systemd

细心的人发现报了一个warning级别的日志:

[WARNING IsDockerSystemdCheck]: detected “cgroupfs” as the Docker cgroup driver. The recommended driver is “systemd”.

[警告IsDockerSystemdCheck]:检测到“cgroupfs”作为Docker cgroup驱动程序。 推荐的驱动程序是“systemd”

k8s cgroup-driver 可以考虑升级为 systemd

对于 docker, 新集群安装可以直接修改 /etc/docker/daemon.json 添加属性:

"exec-opts": [  "native.cgroupdriver=systemd" ]
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐