参考自:http://blog.51cto.com/lizhenliang/2325770
先导知识:
负载均衡工作原理
docker基本管理
了解集群、分布式概念
了解域名解析原理
了解网络协议
master01 10.0.3.171
node1 10.0.3.104
node2 10.0.3.150

#关闭selinux,安全机制,文件的细粒度控制
vim /etc/selinux/config
········
SELINUX=disabled
··········
---------or--------
setenforce 0

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

以上部分在每一台机子上都要关闭
操作

一、部署etcd集群

mkdir k8s
ls
cd k8s
#下载cfssl工具
vi cfssl.sh
-----------------
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
-----------------------
bash cfssl.sh
ls /usr/local/bin/

在这里插入图片描述
cfssl # 查看工具
生成证书

#创建一个证书的文件夹etcd-cert,存放etcd的证书
mkdir etcd-cert
cd etcd-cert
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h" #十年期限
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    ##写自己的IP地址
    #"192.168.31.63",
    #"192.168.31.65",
    #"192.168.31.66"
    "10.0.3.171",
    "10.0.3.104",
    "10.0.3.150",
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

ls *pem

生成证书:ca-key.pem; ca.pem;server-key.pem;server.pem
部署etcd

mkdir /opt/etcd/{bin,cfg,ssl} -p
cp ca*pem server*pem /opt/etcd/ssl
#/k8s目录下,下载etcd二进制包
wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
#也可以在本地下载,通过cmd拷贝过去
scp etcd-v3.3.10-linux-amd64.tar.gz root@10.0.3.171:/root/k8s/

部署etcd

tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
mv etcd-v3.3.10-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/

创建etcd配置文件,运行集成文件etcd.sh.

#vi etcd.sh
#!/bin/bash
# example: ./etcd.sh etcd01 10.0.3.171 etcd02=https://10.0.3.104:2380,etcd03=https://10.0.3.150:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat <<EOF >$WORK_DIR/cfg/etcd
#----------创建etcd配置文件-------------
#[Member]
#ETCD_NAME:节点名称;ETCD_DATA_DIR:数据目录;
#ETCD_LISTEN_PEER_URLS:集群通信监听地址;
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
#ETCD_INITIAL_CLUSTER:集群节点地址
#ETCD_INITIAL_CLUSTER_TOKEN:集群token
#ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="${ETCD_NAME}=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
#---------systemd管理etcd----------
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

执行

chmod +x etcd.sh 

各节点创建相关目录

mkdir k8s
mkdir /opt/etcd/{bin,cfg,ssl} -p

传送相关配置文件到node中

scp etcd.sh root@10.0.3.104:/root/k8s/
scp -r /opt/etcd/bin/* root@10.0.3.104:/opt/etcd/bin/
scp -r /opt/etcd/ssl root@10.0.3.104:/opt/etcd
cd k8s
# 先在node上跑起来
./etcd.sh etcd02 10.0.3.104 etcd01=https://10.0.3.171:2380,etcd03=https://10.0.3.150:2380
./etcd.sh etcd03 10.0.3.150 etcd01=https://10.0.3.171:2380,etcd02=https://10.0.3.104:2380
./etcd.sh etcd01 10.0.3.171 etcd02=https://10.0.3.104:2380,etcd03=https://10.0.3.150:2380

部署完成检查etcd集群状态:

 /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://10.0.3.171:2379,https://10.0.3.104:2379,https://10.0.3.150:2379" cluster-health

在这里插入图片描述

添加etcdctl的环境变量

export ETCDCTL_API=3
systemctl restart etcd
/opt/etcd/bin/etcdctl member list
##也可以直接在/etc/profile中添加export ETCDCTL_API=3来实现

在这里插入图片描述

如果有问题第一步先看日志:/var/log/message 或 journalctl -u etcd

二、在node中安装docker

#在node01和node02中分别进行docker的安装
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce -y
# 加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
systemctl start docker
systemctl enable docker

#############
docker run it nginx
docker version
docker info #查看docker详细信息

在这里插入图片描述

三、配置flannel

#在master01中写入预定义子网段
cd k8s/etcd-cert
/opt/etcd/bin/etcdctl \
--ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem  \
--endpoints="https://10.0.3.239:2379,https://10.0.3.247:2379,https://10.0.3.248:2379" \
set /coreos.com/network/config  '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

将生成的证书拷贝到node01、node02中

scp ca*pem server*pem root@10.0.3.104(150):/opt/etcd/ssl/
在node中部署flannel
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
# 在node01的root目录下
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin

部署生成flannel、flanneld.service、docker.service等文件,配置文件为

# vi flannel.sh
#-----------------内容如下------------
#!/bin/bash

ETCD_ENDPOINTS=${1:-"etcd_endpoints"} 修改此处etcd_endpoints
# 一般直接修改该文件为https://10.0.3.239:2379,https://10.0.3.247:2379,https://10.0.3.248:2379
# 配置flannel
cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

# systemd管理flannel
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

#配置docker启动指定子网段,这个万万不能忘
cat <<EOF >/usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

EOF

#  重启flannel和docker
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart docker

检查是否生效

ps -ef |grep docker
ip addr

查看docker是否使用了flannel预定义的网络
在这里插入图片描述
在这里插入图片描述
同时确保docker0和flannel.1在同一网段。测试不同节点互通,在当前节点访问另一个节点的docker0的IP:

查看另一个node的docker0的IP

在这里插入图片描述

ping 172.17.36.1

在这里插入图片描述
如果能通,则部署flannel成功;不同的话,通过journalctl -u flannel检查下日志。
在另一个node02上,进行复制配置。

scp -r /opt/kubernetes/ root@10.0.3.150:/opt/
-----------
#note:如果150中无相应的文件夹,生成
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
# 其中cfg:token文件;bin:主文件夹,各种组件;ssl:各种证书
-----------
#copy证书
scp -r /opt/etcd/ssl root@10.0.3.150:/opt/etcd
scp /usr/lib/systemd/system/flanneld.service root@10.0.3.150:/usr/lib/systemd/system
#启动flannel
systemctl start flanneld
systemctl enable flanneld
ps -ef |grep flanneld
#有如下显示,则表明flannel启动成功。

在这里插入图片描述
修改docker的启动工具

vi /usr/lib/systemd/system/docker.service
# 启动代码改为
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID

docker compose的安装 https://docs.docker.com/compose/install/




Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐