Install k8s ver.1.18.1 on 3 nodes

Enviroments

  • k8s-master01 192.168.75.10
  • k8-node1 192.168.75.20
  • k8s-node2 192.168.75.30

change hostname and hosts

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master01
vi /etc/hosts  # add hostnames
192.168.75.10 k8s-master01
192.168.75.20 k8s-node1
192.168.75.30 k8s-node2

change yum.repo to 163

mv CentOS-Base.repo CentOS-Base.repo.bak
wget http://mirrors.163.com/.help/CentOS7-Base-163.repo
mv CentOS7-Base-163.repo CentOS-Base.repo
yum clean all
yum makecache

install packages

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp vim net-tools git

disable firewall

systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

disable SELINUX

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

ajust kernel parameters

cat > kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1 #重要 开启网格模式
net.bridge.bridge-nf-call-ip6tables=1 #重要
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0  #禁止使用swap空间,只有当系统OOM时才允许使用它
vm.overcommit_memory=1  #不检查物理内存是否够用
vm.panic_on_oom=0  #开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1 #重要,关闭ipv6协议
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

set timezone

# set timezone to Asia/Shanghai
timedatectl set-timezone Asia/Shanghai
# write current time to hw
timedatectl set-local-rtc 0
# restart time services
systemctl restart rsyslog
systemctl restart crond

stop un-neccessary services

systemctl stop postfix && systemctl disable postfix

setup rsyslogd & systemd journald

mkdir /var/log/journal  #  持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间10G
SystemMaxUse=10G
# 单日志文件最大200M
SystemMaxFileSize=200M
# 日志保存时间2 周
MaxRetentionSec=2week
#  不将日志转发到  syslog
ForwardToSyslog=no 
EOF
systemctl  restart systemd-journald

upgrade Linux kernel to 4.44

在这里插入图片描述

yum install rpm
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt

 #设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)"
完成后 reboot 重启
uname -a 查看内核版本

在这里插入图片描述

Pre-requisites to Kube-proxy turn on IPVS

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules 
&& lsmod | grep -e  ip_vs -e nf_conntrack_ipv4

在这里插入图片描述

install docker

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce
reboot
grub2-set-default "CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)"

# start docker 
systemctl start docker
systemctl enable docker

# configure daemon
cat > /etc/docker/daemon.json << EOF
{
 "exec-opts":["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts":{
   "max-size": "100m"
 }
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# restart servcies
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

install kubeadm

add aliyun yum repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#list current kubeadm, kubectl, kubelet version
yum list kubeadm, kubectl, kubelet
yum install -y kubeadm-1.18.1 kubectl-1.18.1 kubelet-1.18.1
systemctl enable kubelet.service

install kube base images

kube base image download url https://download.csdn.net/download/joeyfu/12379340

kubeadm config images list
    W0411 16:44:06.032313    2168 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    k8s.gcr.io/kube-apiserver:v1.18.1
    k8s.gcr.io/kube-controller-manager:v1.18.1
    k8s.gcr.io/kube-scheduler:v1.18.1
    k8s.gcr.io/kube-proxy:v1.18.1
    k8s.gcr.io/pause:3.2
    k8s.gcr.io/etcd:3.4.3-0
    k8s.gcr.io/coredns:1.6.7
# download images from csdn
https://download.csdn.net/download/joeyfu/12379340
unzip images to /root/kube-base-images-1.8.1
# load images to all nodes by shell script
cat > load_images.sh <<EOF
#!/bin/bash

ls /root/kube-base-images-1.8.1 > /tmp/image-list.txt

cd /root/kube-base-images-1.8.1

for i in $( cat /tmp/image-list.txt )
do
        docker load -i $i
done
EOF

chmod a+x load_images.sh
./load_images.sh

在这里插入图片描述

initialize master node

kubeadm config print init-defaults > kubadm-config.yaml
# change advertiseAddress to k8s-master01 IP
# add apiVersion to ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyModel:true
mode:ipvs

kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

# if you encounter kubelet erros you can 
systemctl daemon-reload && systemctl restart kubelet.service 
curl -sSL http://localhost:10248/healthz  #shall return ok

# if you have errors or failed you can 
kubeadm reset
# successful initialized kubeadm

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.75.10:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:145f8a38f85a478629503f0779875bbc0bb6e077698ed884d8e09c24ed811ea0 


# run post installation commands
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# now you will be able to run kubectl
kubectl get node
NAME           STATUS     ROLES    AGE    VERSION
k8s-master01   NotReady   master   7m2s   v1.18.1
# not ready because network plugin not installed

wget http://mama.indstate.edu/users/ice/tree/src/tree-1.7.0.tgz

Configure network - flannel

wget http://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml
# if images can't pull, try use China source
sed -i 's#quay.io#quay-mirror.qiniu.com#g' kube-flannel.yml
kubectl apply -f kube-flannel.yml

[root@k8s-master01 flannel]# kubectl get po -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-66bff467f8-k5m4w               1/1     Running   0          26m
coredns-66bff467f8-l7wx5               1/1     Running   0          26m
etcd-k8s-master01                      1/1     Running   0          26m
kube-apiserver-k8s-master01            1/1     Running   0          26m
kube-controller-manager-k8s-master01   1/1     Running   0          26m
kube-flannel-ds-amd64-9pbwn            1/1     Running   0          4m57s
kube-proxy-knpkp                       1/1     Running   0          26m
kube-scheduler-k8s-master01            1/1     Running   0          26m
[root@k8s-master01 flannel]# kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   26m   v1.18.1
# node status changed to ready after flannel installed

Add nodes to cluster

kubeadm join 192.168.75.10:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:145f8a38f85a478629503f0779875bbc0bb6e077698ed884d8e09c24ed811ea0 

install Harbor

Prerequisites

# 安装docker-compose
yum install epel-release -y
yum install docker-compose -y
# install packeges
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp vim net-tools git
# disable firewall selinux
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# set timezone
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
systemctl restart rsyslog
systemctl restart crond
# stop unnecessary services
systemctl stop postfix && systemctl disable postfix
# setup rsyslogd & systemd journald
mkdir /var/log/journal  #  持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间10G
SystemMaxUse=10G
# 单日志文件最大200M
SystemMaxFileSize=200M
# 日志保存时间2 周
MaxRetentionSec=2week
#  不将日志转发到  syslog
ForwardToSyslog=no 
EOF
systemctl  restart systemd-journald
# upgrade kernel to 4.44
yum install rpm
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt

 #设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)"
完成后 reboot 重启
uname -a 查看内核版本
# install docker
yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce
reboot
grub2-set-default "CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)"

# start docker 
systemctl start docker
systemctl enable docker

cat > /etc/docker/daemon.json << EOF
{
 "exec-opts":["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts":{
   "max-size": "100m"
 },
 "insecure-registries": ["hub.k8s.com"],
 "registry-mirrors": ["https://hub.k8s.com", "https://reg-mirror.qiniu.com"]
}
EOF

mkdir -p /etc/systemd/system/docker.service.d
# restart servcies
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

# add insecure-registries in all nodes and restart docker service

Harbor setup & installation

# download Harbor offline package
https://pan.baidu.com/s/1IKhBILaocDJkUveG-rXgVQ
提取码: 7bts
tar zxvf harbor-offline-installer-v1.10.1.tgz
mv harbor /usr/local
cd /usr/local/harbor && ls
vi harbor.yml
# 编辑harbor.yml,修改hostname、https证书路径、admin密码
mkdir -p /data/cert
chmod -R 777 /data/cert && cd /data/cert
# 生成私钥,需要设置密码
openssl genrsa -des3 -out harbor.key 2048
#生成CA证书,需要输入密码
openssl req -new -key harbor.key -out harbor.csr
# openssl req -new \ 
#-subj "/C=CN/ST=GD/L=SZ/O=k8s/OU=k8s/CN=hub.k8s.com/emailAddress=k8s@163.com" \
#-key harbor.key -out harbor.csr
# 备份证书
cp harbor.key harbor.key.org
# 退掉私钥密码,以便docker访问(也可以参考官方进行双向认证)
openssl rsa -in harbor.key.org -out harbor.key
# 使用证书进行签名
openssl x509 -req -days 365 -in harbor.csr -signkey harbor.key -out harbor.crt
# grant priviledges to certs
chmod a+x *
# install
cd /usr/local/harbor/
./install.sh
# view hub.k8s.com in chrome
# stop harbor
docker-compose down -v
# start harbor
docker-compose up -d

在这里插入图片描述

Add nodes to harbor

docker login https://hub.k8s.com
# try upload image to harbor
docker pull wangyanglinux/myapp:v1
docker tag wangyanglinux/myapp:v1 hub.k8s.com/library/myapp:v1
docker push hub.k8s.com/library/myapp:v1

Harbor login page
在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐