一、规划

1. 集群配置

IP	            hostname	 系统	        要求        主要安装软件
192.160.10.10	master	   CentOS7.9  两核上/4G/100G    kubeadm/kubelet/kubectl/containerd
192.160.10.20	node01	   CentOS7.9  两核/4G/100G    kubeadm/kubelet/kubectl/containerd
192.160.10.30	node02	   CentOS7.9	两核/4G/100G    kubeadm/kubelet/kubectl/containerd
192.168.10.40   jenkins    CentOS7.9	两核/4G/100G    jenkins
192.168.10.50   gitlab     CentOS7.9	两核/4G/100G    gitlab
192.168.10.60   harbor     CentOS7.9	两核/4G/100G    harbor

二、安装(第二步需要在所有节点执行配置)

1. 初始化坏境

#依次修改主机名
hostnamectl set-hostname master && bash
hostnamectl set-hostname node01 && bash
hostnamectl set-hostname node02 && bash 

#hosts解析
cat > /etc/hosts << EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.10 master
192.168.10.20 node01
192.168.10.30 node02
EOF



#关闭selinux、防火墙、交换分区
sed -i 's/^ *SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

systemctl stop firewalld.service ;  systemctl disable firewalld

sed -ri 's/.*swap.*/#&/' /etc/fstab 
swapoff -a


#添加在线yum源
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo  
curl -o /etc/yum.repos.d/CentOS-Base2.repo http://mirrors.aliyun.com/repo/Centos-7.repo

#kubernetes在线源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF



#时间同步
timedatectl set-timezone Asia/Shanghai
yum install ntpdate -y
ntpdate ntp1.aliyun.com

#加载模块
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilt

#修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF


#重新加载文件生效系统配置
sysctl -p /etc/sysctl.d/k8s.conf

sysctl --system


#这个命令的作用是应用 k8s.conf 文件中的内核参数设置,并且开启网络桥接的防火墙功能。其中 k8s.conf 文件中的内容包括以下三个参数设置:
#net.bridge.bridge-nf-call-iptables = 1 表示开启防火墙功能。
#net.bridge.bridge-nf-call-ip6tables = 1 表示开启 IPV6 的防火墙功能。
#net.ipv4.ip_forward = 1 表示开启 IP 转发功能。


#安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
#重启服务
systemctl restart systemd-modules-load.service


#IPVS(IP Virtual Server)是 Linux 内核自带的一种负载均衡技术,
#它可以将请求分配到一组后端服务#器(也称为“真实服务器”),从而提高系统的性能和可靠性。
#IPVS 采用网络地址转换(NAT)或直接路由模式来处理请求,
#支持 LVS(Linux Virtual Server)协议、TCP、UDP 和 SCTP 等多种协议。
#IPVS 能够自动检测后端服务器的状态,如果某个服务器出现故障,
#IPVS 会自动将其从服务器池中删除,并将请求转发给其他健康的服务器。

2. 安装基础包(需要耐心等待)

yum install -y yum-utils device-mapper-persistent-data lvm2 \
wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake \
libxml2-devel openssl-devel  unzip sudo ntp \
libaio-devel wget  ncurses-devel autoconf automake zlibdevel \
python-devel epel-release openssh-server socat ipvsadm conntrack  telnet 

3.安装配置containerd 或者docker   (选一个安装就好)

containerd   ### 如果两个都安装了,可以先把containerd服务停掉,在初始化时就不会报错
yum install -y containerd.io


#生成containerd的配置文件
mkdir /etc/containerd -p 

#生成配置文件
containerd config default > /etc/containerd/config.toml

#编辑配置文件
vim /etc/containerd/config.toml  


sandbox_image = "k8s.gcr.io/pause:3.6"   //61行
改成
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"  //61行

SystemdCgroup = false 改为 SystemdCgroup = true     //125行



#配置好文件直接for循环递归传到node01\node02

for i in node01 node02; \
do scp -r /etc/containerd/config.toml $i:/etc/containerd/config.toml; done

#重启服务   
systemctl enable containerd; systemctl start containerd 
 docker
yum install container-selinux -y


yum install -y yum-utils device-mapper-persistent-data lvm2 
yum install -y docker-ce  containerd.io
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": ["https://reg-mirror.qiniu.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "500m", "max-file": "3"
  }
}
EOF


#使用Systemd管理的Cgroup来进行资源控制与管理,因为相对Cgroupfs而言,Systemd限制CPU、内存等资源更加简单和成熟稳定。
#日志使用json-file格式类型存储,大小为100M,保存在/var/log/containers目录下,方便ELK等日志系统收集和管理日志。

systemctl daemon-reload
systemctl restart docker.service
systemctl enable docker.service 

docker info | grep "Cgroup Driver"
cd /opt
rpm -ivh cri-dockerd-0.3.4-3.el7.x86_64.rpm 

vim /lib/systemd/system/cri-docker.service
#修改ExecStart行如下
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8

systemctl daemon-reload
systemctl enable --now cri-docker

for i in node01 node02; do scp /lib/systemd/system/cri-docker.service $i:/lib/systemd/system/cri-docker.service; done

systemctl daemon-reload
systemctl enable --now cri-docker








4.安装kubeadm,kubelet和kubectl


#目前最新版本是1.28.2,我们直接上最新版
yum install -y kubectl kubelet kubeadm

#为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,修改如下文件内容。

cat << EOF > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF

for i in node01 node02; \
do scp -r /etc/sysconfig/kubelet $i:/etc/sysconfig/kubelet; done




#设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet

#开机自启kubelet
systemctl enable kubelet.service

//查看初始化需要的镜像
kubeadm config images list --kubernetes-version 1.28.2

# 生成 kubeadm快速搭建k8s的配置文件
kubeadm config print init-defaults > /opt/kubeadm-config.yaml


apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.10.10     ### 注意是指master主机
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master    ### 注意是指master主机名
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.20.15    #这里也可以设置成1.28.2
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs



三、集群初始化

1.初始化master

查看k8s stable 所需要的镜像
kubeadm config images list --kubernetes-version=stable
使用kubeadm init命令初始化
在master上执行,报错请看k8s报错汇总

方法一(建议不用)
kubeadm init --kubernetes-version=stable --pod-network-cidr=10.218.0.0/16 --apiserver-advertise-address=192.168.10.10 --image-repository registry.aliyuncs.com/google_containers

--apiserver-advertise-address 集群通告地址,也就是master  IP地址
--image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
--kubernetes-version K8s版本,与上面安装的一致
--service-cidr 集群内部虚拟网络,Pod统一访问入口
--pod-network-cidr Pod网络,,与下面部署的CNI网络组件yaml中保持一致

方法二(新手适用)
//初始化 master
kubeadm init --config=/opt/kubeadm-config.yaml --upload-certs | tee kubeadm-init.log


初始化成功结果如下:
    
    
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.10:6443 --token uit9ko.dioyadpnoxsejius \
        --discovery-token-ca-cert-hash sha256:8ca57c7517eecdc8ffd9cfafa2ed0204d5f7c20fd460def250a31764d38438fd
-----------------------------------








##初始完之后看这里

mkdir -p $HOME/.kube   //所有节点操作


# 拷贝 kubeconfig 文件  //master节点操作
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


# 添加节点
#将 master 节点上面的 $HOME/.kube/config 文件拷贝到 node 节点对应的文件中,
#安装 kubeadm、kubelet、kubectl,然后执行上面初始化完成后提示的 join 命令即可:

scp -p .kube/config node01:~/.kube/
scp -p .kube/config node02:~/.kube/

#两台都执行下面操作 //复制自己的,不要复制我的
kubeadm join 192.168.10.10:6443 --token uit9ko.dioyadpnoxsejius \
        --discovery-token-ca-cert-hash sha256:8ca57c7517eecdc8ffd9cfafa2ed0204d5f7c20fd460def250a31764d38438fd


结果如下:
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# 查看集群节点:
kubectl get node

#join 命令可以使用命令kubeadm token create --print-join-command重新获取。

2.安装网络插件(calico或者flannel)--在master安装,漫长的等待几分钟


方法一、网络cni插件calico暂时失效,我们可以访问其他插件 flannel
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/tigera-operator.yaml

wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml

vi custom-resources.yaml   //13行  13gg

cidr: 192.168.0.0/16   //IP改成  10.218.0.0/16


kubectl create -f custom-resources.yaml

方法二、
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

下载得到的配置文件可能需要更改,请看官网如下解释

Deploying flannel manually
Flannel can be added to any existing Kubernetes cluster though it's simplest to add flannel before any pods using the pod network have been started.

For Kubernetes v1.17+

Deploying Flannel with kubectl
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

If you use custom podCIDR (not 10.244.0.0/16) you first need to download the above manifest and modify the network to match your one.





3.确认集群是否准备好

kubectl get nodes
kubectl get pods -A -o wide

 4. 安装命令自动补全

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
 


5. 安装dashboard

wget  https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

# 修改配置
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort   #新增
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
    

# 创建资源
kubectl apply -f recommend.yaml

#查看资源是否已经就绪
kubectl get all -n kubernetes-dashboard -o wide

# 访问测试
https://192.168.10.10:端口

# 创建账号配置文件
touch dashboard-admin.yaml


# 配置文件
apiVersion: v1 
kind: ServiceAccount 
metadata: 
  labels: 
    k8s-app: kubernetes-dashboard 
  name: dashboard-admin 
  namespace: kubernetes-dashboard 
--- 
apiVersion: rbac.authorization.k8s.io/v1 
kind: ClusterRoleBinding 
metadata: 
  name: dashboard-admin-cluster-role 
roleRef: 
  apiGroup: rbac.authorization.k8s.io 
  kind: ClusterRole 
  name: cluster-admin 
subjects: 
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kubernetes-dashboard

#创建认证资源
kubectl  apply -f dashboard-admin.yaml

#查看账号信息
kubectl describe serviceaccount dashboard-admin -n kubernetes-dashboard

# 创建账号的 token 登录 dashboard
kubectl -n kubernetes-dashboard create token dashboard-admin

#获取dashboard nodeport ip

kubectl get svc
kubectl get all -n kubernetes-dashboard -o wide

访问 https://192.168.10.10:nodeport   

 

 四、安装gitlab+jenkins

1.安装gitlab

yum install -y curl policycoreutils openssh-server openssh-clients postfix policycoreutils-python

systemctl start postfix
systemctl enable postfix

wget https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el7/gitlab-ce-12.0.2-ce.0.el7.x86_64.rpm

rpm -ivh gitlab-ce-12.0.2-ce.0.el7.x86_64.rpm
sed -i "/^external_url/cexternal_url 'http://192.168.10.40'" /etc/gitlab/gitlab.rb

gitlab-ctl reconfigure
netstat -anpt | grep nginx


vim /etc/gitlab/gitlab.rb		13                        # 在第 57 行后添加以下内容
gitlab_rails['smtp_enable'] = true
gitlab_rails['smtp_address'] = 'smtp.qq.com'			# SMTP 服务器
gitlab_rails['smtp_port'] = 465						    # SMTP 服务器端口号
gitlab_rails['smtp_user_name'] = '2085077346@qq.com'					# 邮件账号
gitlab_rails['smtp_password'] = ''				 				        # 邮件授权码
gitlab_rails['smtp_authentication'] = 'login'
gitlab_rails['smtp_enable_starttls_auto'] = true
gitlab_rails['smtp_tls'] = true
gitlab_rails['gitlab_email_from'] = '2085077346@qq.com'


gitlab-ctl reconfigure

gitlab-rails console

irb(main):001:0> Notify.test_email('ChenZhuang1217@163.com','Test','Hello ZhangSan').deliver_now

ssh-keygen -t rsa					# 配置密钥对 (三连回车即可)
cat .ssh/id_rsa.pub		-		# 查看公钥

git clone git@192.168.1.1:ZhangSan/test.git

ls
anaconda-ks.cfg  gitlab-ce-12.0.2-ce.0.el7.x86_64.rpm  test
cd test/

ls
1.html








 2.jenkins




#把安装文件放到该目录
mkdir /home/soft
mkdir /home/java




tar -zxvf /home/soft/jdk-8u171-linux-x64.tar.gz   -C  /home/java

#编辑环境变量文件
vim /etc/profile

#配置java环境变量
export JAVA_HOME=/var/jenkins_home/thh/jdk1.8.0_171
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export PATH=$PATH:${JAVA_PATH}

#环境文件生效
source /etc/profile

#查看jdk版本
java  -version



mkdir /home/maven


tar -zxvf /home/soft/apache-maven-3.6.3-bin.tar.gz   -C  /home/maven
#编辑环境变量文件
vim /etc/profile

MAVEN_HOME=/var/jenkins_home/thh/apache-maven-3.6.3
export MAVEN_HOME
export PATH=${PATH}:${MAVEN_HOME}/bin

#环境文件生效
source /etc/profile


#查看maven版本
mvn -v



参考博客链接,十分感谢各位博主和社区官方。

GitHub - flannel-io/flannel: flannel is a network fabric for containers, designed for Kubernetes

kubeadm快速安装最新版 Kubernetes(k8s)1.27.2_k8s最新版本_Harr1y的博客-CSDN博客

kubeadm方式部署Kubernets1.27.+ - 知乎

https://www.cnblogs.com/zuoyang/p/16447958.html

Quickstart for Calico on Kubernetes | Calico Documentation

使用 kubeadm 引导集群 - 安装 kubeadm - 《Kubernetes v1.27 中文文档》 - 书栈网 · BookStack

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐