10.10.10.10 rancher-server
10.10.10.11 k8s-node1
10.10.10.22 k8s-node2
10.10.10.33 k8s-node3


server.1=1192.168.1.11:2888:3888
server.2=1192.168.1.12:2888:3888
server.3=1192.168.1.12:2888:3888
server.4=1192.168.1.14:2888:3888
server.5=1192.168.1.15:2888:3888

echo "2" > data/myid

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf


kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml
   
wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml

yum install crio and podman -y

hostnamectl set-hostname 

sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo
sudo yum -y install podmansudo x

cd .ssh && rm -rf *
ssh-keygen

sudo yum clean all
yum makecache
sudo yum update -y
reboot

ssh-copy-id 10.10.10.11
ssh-copy-id 10.10.10.22
ssh-copy-id 10.10.10.33
ssh-copy-id 10.10.10.44
ssh-copy-id 10.10.10.55
lH7JmHycxpqryfHH
yum install crio and podman -y


echo 30 > /proc/sys/kernel/watchdog_thresh
echo "kernel.watchdog_thresh=30" >> /etc/sysctl.conf 
sysctl -w kernel.watchdog_thresh=30
sysctl -q vm.swappiness
sysctl -p

kubectl get pod --all-namespaces

reboot

df -h|grep kubelet |awk -F % '{print $2}'|xargs umount
sudo docker rm -f $(sudo docker ps -qa)

for m in $(sudo tac /proc/mounts | sudo awk '{print $2}'|sudo grep /var/lib/kubelet);do
sudo umount $m||true
done
sudo rm -rf /var/lib/kubelet/
for m in $(sudo tac /proc/mounts | sudo awk '{print $2}'|sudo grep /var/lib/rancher);do
sudo umount $m||true
done
sudo rm -rf /var/lib/rancher/
sudo rm -rf /run/kubernetes/
sudo docker volume rm $(sudo docker volume ls -q)
sudo docker ps -a
sudo docker volume ls
rm /var/lib/kubelet/* -rf
rm /etc/kubernetes/* -rf
rm /var/lib/rancher/* -rf
rm /var/lib/etcd/* -rf

iptables -F && iptables -t nat -F
ip link del flannel.1
docker ps -a|awk '{print $1}'|xargs docker rm -f
docker volume ls|awk '{print $2}'|xargs docker volume rm
df -h|grep kubelet |awk -F % '{print $2}'|xargs umount
#删除所有容器
sudo docker rm -f $(sudo docker ps -qa)

#删除/var/lib/kubelet/目录,删除前先卸载
for m in $(sudo tac /proc/mounts | sudo awk '{print $2}'|sudo grep /var/lib/kubelet);do
sudo umount $m||true
done
sudo rm -rf /var/lib/kubelet/

#删除/var/lib/rancher/目录,删除前先卸载
for m in $(sudo tac /proc/mounts | sudo awk '{print $2}'|sudo grep /var/lib/rancher);do
sudo umount $m||true
done
sudo rm -rf /var/lib/rancher/
#删除/run/kubernetes/ 目录
sudo rm -rf /run/kubernetes/
#删除所有的数据卷
sudo docker volume rm $(sudo docker volume ls -q)
#再次显示所有的容器和数据卷,确保没有残留
sudo docker ps -a
sudo docker volume ls
rm /var/lib/kubelet/* -rf
rm /etc/kubernetes/* -rf
rm /var/lib/rancher/* -rf

rm /var/lib/cni/* -rf
iptables -F && iptables -t nat -F
ip link del flannel.1
docker ps -a|awk '{print $1}'|xargs docker rm -f
docker volume ls|awk '{print $2}'|xargs docker volume rm
docker stop $(docker ps -q) & docker rm -f $(docker ps -aq)
rm -rf /docker_volume/rancher_home/rancher
rm -rf /docker_volume/rancher_home/auditlog
mkdir -p /docker_volume/rancher_home/rancher
mkdir -p /docker_volume/rancher_home/auditlog
docker rmi `docker images -q`

#删除/var/etcd目录
sudo rm -rf /var/lib/rancher/
#删除/run/kubernetes/ 目录
sudo rm -rf /run/kubernetes/
rm /var/lib/cni/* -rf
iptables -F && iptables -t nat -F
ip link del flannel.1
rm /etc/kubernetes/* -rf
rm /var/lib/cni/* -rf
sudo rm -rf /var/etcd
sudo rm -rf /var/etcd
rm -rf /etc/etcd/*
rm -rf /etc/kubernetes/*
rm -rf /usr/local/bin/etcdctl
rm -rf /etc/ssl/etcd
rm -rf /etc/ssl/etcd
rm -rf /var/lib/etcd
rm -rf /usr/local/bin/etcd
rm -rf /etc/etcd.env
rm /var/lib/etcd/* -rf
rm /var/lib/etcd/* -rf
rm -rf /etc/cni/net.d
rm -rf ~/.kube/config


The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]

sudo -E /bin/sh -c "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && ip link del kube-ipvs0 && ip link del nodelocaldns"


sudo -E /bin/sh -c "/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml"


docker run -d \
  --restart=unless-stopped \
  --name=kuboard-spray \
  -p 80:80/tcp \
  -v /var/run/docker.sock:/var/run/docker.sock \
  -v ~/kuboard-spray-data:/data \
  swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard-spray:latest-amd64

yum remove crio and podman -y
yum remove docker* -y
yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run  rancher/rancher-agent:v2.6.3 --server https://10.10.10.10 --token hf5flprcxqvg6qwmrmhs7ljrh4r92d6lqldwcktbnpt5pgw5xgjp52 --ca-checksum 0db85f240bf125450ea6e73dae25fc173dba24a1fd420cb9610ac8041310739e --etcd --controlplane --worker

ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:83:91:2a brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.38/24 brd 10.10.10.255 scope global noprefixroute dynamic ens33
       valid_lft 1589sec preferred_lft 1589sec
    inet 10.10.10.22/24 brd 10.10.10.255 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::3394:2662:3296:2a31/64 scope link noprefixroute 

cd /etc/kubernetes/pki && rm -rf *


yum install net-tools -y
systemctl stop firewalld
systemctl disable firewalld
sudo sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
yum install -y vim wget git
yum update -y
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
yum install docker-ce
systemctl start docker


docker stop $(docker ps -q) & docker rm -f $(docker ps -aq)
atT6bA1LwHiEaRiW


安装docker


#!/bin/bash

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all && yum makecache
yum install net-tools -y
systemctl stop firewalld
systemctl disable firewalld
sudo sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
yum install -y vim wget git


sudo yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2
sudo yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce docker-ce-cli containerd.io -y
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://u1f9ys09.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl enable docker
sudo systemctl restart docker

部署rancher

sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher

docker pull registry.cn-shanghai.aliyuncs.com/kuboard-spray/kuboard-spray-resource:spray-v2.18.0a-8_k8s-v1.23.5_v1.11-amd64


sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run  rancher/rancher-agent:v2.6.3 --server https://192.168.1.11 --token nllkf89dgglbhds7gg5bj7bl7492qzcfmt5xctfszg6dhk6dz95tpn --ca-checksum c1e1db82151efbeb5f297385be3206905c128e84451ee7cd591ebc1b7aaf3feb --etcd --controlplane --worker


S2TNxUAPUgqgcxe9

/etc/init.d/network restart


docker logs rancher 2>&1 | grep "Bootstrap Password:"

9HJP9N6ywf2lNy72

kubectl get storageclass
kubectl patch storageclass <your-class-name> -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run  rancher/rancher-agent:v2.6.3 --server https://10.10.10.10 --token xvhnvpqgxbpc2m9vzrvhp5qcs8vmbp4cfb4v4lfk9ck8l9zzpkvrxf --ca-checksum 1d78866dfa2624d40356b785b380d20f88a8332bc2d5a89714008d5fd3c80de2 --etcd --controlplane --worker


sealos init --passwd 'liebe' \
    --master 10.10.10.11  --master 10.10.10.22 \
    --node 10.10.10.33 \
    --pkg-url /root/kube1.22.0.tar.gz \
    --version v1.22.0
    
    
    kubernetes .0版本不建议上生产环境!!!

只需要准备好服务器,在任意一台服务器上执行下面命令即可

# 下载并安装sealos, sealos是个golang的二进制工具,直接下载拷贝到bin目录即可, release页面也可下载
wget -c https://sealyun.oss-cn-beijing.aliyuncs.com/latest/sealos && \
    chmod +x sealos && mv sealos /usr/bin

# 下载离线资源包
wget -c https://sealyun.oss-cn-beijing.aliyuncs.com/05a3db657821277f5f3b92d834bbaf98-v1.22.0/kube1.22.0.tar.gz

# 安装一个三master的kubernetes集群
sealos init --passwd '123456' \
    --master 192.168.0.2  --master 192.168.0.3  --master 192.168.0.4  \
    --node 192.168.0.5 \
    --pkg-url /root/kube1.22.0.tar.gz \
    --version v1.22.0
参数含义

参数名    含义    示例
passwd    服务器密码    123456
master    k8s master节点IP地址    192.168.0.2
node    k8s node节点IP地址    192.168.0.3
pkg-url    离线资源包地址,支持下载到本地,或者一个远程地址    /root/kube1.22.0.tar.gz
version    资源包 对应的版本    v1.22.0
增加master

🐳 → sealos join --master 192.168.0.6 --master 192.168.0.7
🐳 → sealos join --master 192.168.0.6-192.168.0.9  # 或者多个连续IP
增加node

🐳 → sealos join --node 192.168.0.6 --node 192.168.0.7
🐳 → sealos join --node 192.168.0.6-192.168.0.9  # 或者多个连续IP
删除指定master节点

🐳 → sealos clean --master 192.168.0.6 --master 192.168.0.7
🐳 → sealos clean --master 192.168.0.6-192.168.0.9  # 或者多个连续IP
删除指定node节点

🐳 → sealos clean --node 192.168.0.6 --node 192.168.0.7
🐳 → sealos clean --node 192.168.0.6-192.168.0.9  # 或者多个连续IP
清理集群

🐳 → sealos clean --all
✅ 特性
    
    
    
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装
一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'

要看内核的真正版本,可能有差异

grub2-set-default 'CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)'

1. 查看当前内核版本
$  uname -sr
Linux 3.10.0-693.2.2.el7.x86_64
2. 更新内核


rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum --enablerepo=elrepo-kernel install kernel-ml -y

4. 设置默认启动项
使 GRUB_DEFAULT=0

 vim /etc/default/grub
5. 重新生成grub配置文件
 grub2-mkconfig -o /boot/grub2/grub.cfg
6. 重启并查看内核版本
reboot
查看内核版本:

$  uname -sr
Linux 5.4.6-1.el7.elrepo.x86_64


gluster peer probe 10.10.10.11
gluster peer probe 10.10.10.22
gluster peer probe 10.10.10.33

ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
chown heketi:heketi  /etc/heketi/heketi*
ssh-copy-id -i /etc/heketi/heketi_key.pub root@10.10.10.11
ssh-copy-id -i /etc/heketi/heketi_key.pub root@10.10.10.22
ssh-copy-id -i /etc/heketi/heketi_key.pub root@10.10.10.33


cat <<EOF> /etc/heketi/heketi.json
{
  "port": "18080",
  "use_auth": false,
 
  "jwt": {
    "admin": {
      "key": "adminSecret"
    },
    "user": {
      "key": "userSecret"
    }
  },
 
  "glusterfs": {
    "executor": "ssh",
    "sshexec": {
      "keyfile": "/etc/heketi/heketi_key",
      "user": "root",
      "port": "22",
      "fstab": "/etc/fstab"
    },
    "brick_min_size_gb" : 1,
    "db": "/var/lib/heketi/heketi.db",
    "loglevel" : "debug"
  }
}
EOF

cat <<EOF> /etc/heketi/topolgy_demo.json
{
  "clusters": [
    {
      "nodes": [
         {
           "node": {
               "hostnames": {
                   "manage": [
                      "10.10.10.11"
                    ],
                   "storage": [
                   "10.10.10.11"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           },
 
         {  "node": {
               "hostnames": {
                   "manage": [
                      "10.10.10.22"
                    ],
                   "storage": [
                   "10.10.10.22"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           },
         { "node": {
               "hostnames": {
                   "manage": [
                      "10.10.10.33"
                    ],
                   "storage": [
                   "10.10.10.33"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           }     
        ]
     }
   ]
}
EOF

export HEKETI_CLI_SERVER=http://10.10.10.11:18080
echo $HEKETI_CLI_SERVER
heketi-cli topology load --json=/etc/heketi/topolgy_demo.json


cat <<EOF> gluster-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: glusterfs
provisioner: kubernetes.io/glusterfs
parameters:
  resturl: "http://10.10.10.11:18080"
  restuser: "admin"
  gidMin: "40000"
  gidMax: "50000"
  restauthenabled: "true"
  volumetype: "replicate:3"
EOF


kubectl apply -f  gluster-storage-class.yaml


kubectl patch storageclass local -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'


hostnamectl set-hostname node1


./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1


./kk create cluster -f config-sample.yaml

yum install conntrack-tools socat -y

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐