1.搭建说明
1):本地搭建我选用的是用vmware安装centos系统进行模拟服务器,然后进行k8s集群的搭建。
2.环境准备
1):配置详情

主机名ip系统版本安装组件
kube-master0192.168.1.10Centos 7.6.1810etcd、api-server、scheduler、controller-manager、kubelet
kube-node1192.168.1.12Centos 7.6.1810docker、kubelet、kube-proxy

2):hosts详情

[root@kube-master0 app]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.10 kube-master0
192.168.1.12 kube-node1

3.下载地址
GITHUB地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.8.md#v1815

wget https://dl.k8s.io/v1.8.15/kubernetes-server-linux-amd64.tar.gz
wget https://dl.k8s.io/v1.8.15/kubernetes-node-linux-amd64.tar.gz
wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz

4.关闭SElinux和防火墙(kube-master0、kube-node1)

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sestatus
systemctl disable firewalld.service && systemctl stop firewalld.service
firewall-cmd --state

5.关闭swap分区(kube-master0、kube-node1)

swapoff -a
或者注释掉(重启系统生效)
[root@kube-master0 ~]# vim /etc/fstab
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

6.修改节点的主机名(kube-master0、kube-node1)

192.168.1.10 kube-master0
192.168.1.12 kube-node1

7.配置系统路由参数,开启ipv4转(kube-master0、kube-node1)

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl --system
sysctl -a | grep "net.ipv4.ip_forward"

8.安装eted
1)将下载好的etcd-v3.3.10-linux-amd64.tar.gz上传到服务器
2)解压复制命令到/usr/bin目录

tar -zxvf etcd-v3.3.10-linux-amd64.tar.gz
cp -v etcd-v3.3.10-linux-amd64/{etcd,etcdctl} /usr/bin

3)配置etcd
创建目录

mkdir -p /var/lib/etcd /etc/etcd

创建配置文件

cat > /etc/etcd/etcd.conf <<EOF
ETCD_NAME="default"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.10:2379,http://127.0.0.1:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.10:2379"
EOF

4)创建etcd启动服务

cat > /usr/lib/systemd/system/etcd.service <<EOF

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

 

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd

EnvironmentFile=-/etc/etcd/etcd.conf

ExecStart=/usr/bin/etcd

Restart=on-failure

# Restart=always

LimitNOFILE=65536

 

[Install]

WantedBy=multi-user.target

EOF

5)启动etcd服务

systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd && systemctl status etcd

ss -antulp | grep etcd

6)检验etcd是否安装成功

etcdctl cluster-health

返回如下,代表启动成功

member 8e9e05c52164694d is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy

9.Master节点部署组件
1)将下载好的kubernetes-server-linux-amd64.tar.gz上传至服务器
2)解压并拷贝文件到/usr/bin

tar -zxvf kubernetes-server-linux-amd64.tar.gz
cp -v kubernetes/server/bin/{kube-apiserver,kube-scheduler,kube-controller-manager,kubectl} /usr/bin

3)创建目录用于存放配置文件

mkdir -p /etc/kubernetes

4)配置kube-apiserver.service服务

cat > /etc/kubernetes/kube-apiserver <<EOF

# 启用日志标准错误

KUBE_LOGTOSTDERR="--logtostderr=true"

# 日志级别

KUBE_LOG_LEVEL="--v=4"

# Etcd服务地址

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.1.10:2379"

# API服务监听地址

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

# API服务监听端口

KUBE_API_PORT="--insecure-port=8080"

# 对集群中成员提供API服务地址

KUBE_ADVERTISE_ADDR="--advertise-address=192.168.1.10"

# 允许容器请求特权模式,默认false

KUBE_ALLOW_PRIV="--allow-privileged=false"

# 集群分配的IP范围

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=192.168.1.0/24"

KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ResourceQuota"
EOF

5)创建kube-apiserver启动文件

vim /usr/lib/systemd/system/kube-apiserver.service
#加入以下内容
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver
ExecStart=/usr/bin/kube-apiserver \
${KUBE_LOGTOSTDERR} \
${KUBE_LOG_LEVEL} \
${KUBE_ETCD_SERVERS} \
${KUBE_API_ADDRESS} \
${KUBE_API_PORT} \
${KUBE_ADVERTISE_ADDR} \
${KUBE_ALLOW_PRIV} \
${KUBE_SERVICE_ADDRESSES} \
${KUBE_ADMISSION_CONTROL}
Restart=on-failure
[Install]
WantedBy=multi-user.target

6)启动kube-apiserver服务(加入开启自启动)

systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver && systemctl status kube-apiserver

7)配置kube-scheduler服务

cat > /etc/kubernetes/kube-scheduler <<EOF

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.1.10:8080"

KUBE_LEADER_ELECT="--leader-elect"

EOF

8)创建kube-scheduler启动文件

vim /usr/lib/systemd/system/kube-scheduler.service
#加入以下内容
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler
ExecStart=/usr/bin/kube-scheduler \
${KUBE_LOGTOSTDERR} \
${KUBE_LOG_LEVEL} \
${KUBE_MASTER} \
${KUBE_LEADER_ELECT}
Restart=on-failure
[Install]
WantedBy=multi-user.target

9)启动kube-scheduler服务(加入开启自启动)

systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler && systemctl status kube-scheduler

10)配置kube-controller-manger

cat > /etc/kubernetes/kube-controller-manager <<EOF

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.1.10:8080"

KUBE_SERVICE_ACCOUNT_KEY="--service-account-private-key-file=/var/run/kubernetes/apiserver.key"

EOF

11)创建kube-controller-manger启动文件

vim /usr/lib/systemd/system/kube-controller-manager.service
#加入以下内容
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager
ExecStart=/usr/bin/kube-controller-manager \
${KUBE_LOGTOSTDERR} \
${KUBE_LOG_LEVEL} \
${KUBE_MASTER} \
${KUBE_LEADER_ELECT} \
${KUBE_SERVICE_ACCOUNT_KEY}
Restart=on-failure
[Install]
WantedBy=multi-user.target

12)启动kube-controller-manager 服务(加入开启自启动)

systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager && systemctl status kube-controller-manager 

13)查看Master节点组件运行进程和节点状态

ps -ef|grep kube
root      18663      1  2 11:47 ?        00:00:22 /usr/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=http://192.168.1.10:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --advertise-address=192.168.1.10 --allow-privileged=false --service-cluster-ip-range=192.168.1.0/24
root      19621      1  1 12:00 ?        00:00:01 /usr/bin/kube-scheduler --logtostderr=true --v=4 --master=192.168.1.10:8080 --leader-elect
root      19775      1  2 12:02 ?        00:00:01 /usr/bin/kube-controller-manager --logtostderr=true --v=4 --master=192.168.1.10:8080
root      19823  10652  0 12:03 pts/0    00:00:00 grep --color=auto kube

节点状态

kubectl get componentstatuses
NAME                 STATUS    MESSAGE             ERROR
etcd-0               Healthy   {"health":"true"}
controller-manager   Healthy   ok
scheduler            Healthy   ok

如果启动失败,查看日志

#kube-apiserver是各个问题服务的名称
 journalctl -u kube-apiserver

14)Master节点服务启动顺序
先启动etcd,再启动apiserver,剩余组件可以无序启动。
10.安装docker-ce(kube-node1)
1)下载docker-ce仓库

yum -y install yum-utils device-mapper-persistent-data lvm2 conntrack-tools bridge-utils ipvsadm
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast

查看安装版本

yum list docker-ce --showduplicates |sort -r
yum list docker-ce-selinux --showduplicates |sort -r

2)安装docker-ce

yum -y install --setopt=obsoletes=0 docker-ce-17.03.3.ce-1.el7 docker-ce-selinux-17.03.3.ce-1.el7
systemctl enable docker && systemctl start docker && systemctl status docker

3)添加Docker加速器

tee /etc/docker/daemon.json <<-'EOF'

{

 "registry-mirrors": ["https://u6g7us5g.mirror.aliyuncs.com"]

}

EOF

重启docker服务

systemctl daemon-reload && systemctl restart docker && systemctl status docker

11.部署K8s-Node节点组件(kube-node1)
1)上传kubernetes-node-linux-amd64.tar.gz到指定服务器
2)解压并复制配置到指定目录

tar -zxvf kubernetes-node-linux-amd64.tar.gz
cp -v kubernetes/node/bin/{kubelet,kube-proxy} /usr/bin

3)配置kubelet服务
创建目录存放配置文件

mkdir -p /etc/kubernetes/

4)创建kubeconfig配置文件

cat > /etc/kubernetes/kubelet.kubeconfig <<EOF

apiVersion: v1

kind: Config

clusters:

  - cluster:

      server: http://192.168.1.10:8080

    name: local

contexts:

  - context:

      cluster: local

    name: local

current-context: local

EOF

5)创建kubelet配置文件

cat > /etc/kubernetes/kubelet <<EOF

# 启用日志标准错误

KUBE_LOGTOSTDERR="--logtostderr=true"

# 日志级别

KUBE_LOG_LEVEL="--v=4"

# Kubelet服务IP地址
NODE_ADDRESS="--address=192.168.1.12"

# Kubelet服务端口

NODE_PORT="--port=10250"

# 自定义节点名称
NODE_HOSTNAME="--hostname-override=192.168.1.12"

# kubeconfig路径,指定连接API服务器
KUBELET_KUBECONFIG="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"

# 允许容器请求特权模式,默认false

KUBE_ALLOW_PRIV="--allow-privileged=false"

# DNS信息

KUBELET_DNS_IP="--cluster-dns=192.168.1.1"

KUBELET_DNS_DOMAIN="--cluster-domain=cluster.local"

# 禁用使用Swap

KUBELET_SWAP="--fail-swap-on=false"

#pause-amd64 的仓库地址

KUBELET_ARGS="--pod_infra_container_image=registry.cn-beijing.aliyuncs.com/zhoujun/pause-amd64:3.1"
EOF

6)kubelet服务启动脚本
新建 vim /usr/lib/systemd/system/kubelet.service

#加入以下配置
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet \
${KUBE_LOGTOSTDERR} \
${KUBE_LOG_LEVEL} \
${NODE_ADDRESS} \
${NODE_PORT} \
${NODE_HOSTNAME} \
${KUBELET_KUBECONFIG} \
${KUBE_ALLOW_PRIV} \
${KUBELET_DNS_IP} \
${KUBELET_DNS_DOMAIN} \
${KUBELET_SWAP} \
${KUBELET_ARGS}
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target

7)启动服务kubelet(加入开机自启)

systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet && systemctl status kubelet

8)配置kube-proxy服务

cat > /etc/kubernetes/kube-proxy <<EOF

# 启用日志标准错误

KUBE_LOGTOSTDERR="--logtostderr=true"

# 日志级别

KUBE_LOG_LEVEL="--v=4"

# 自定义节点名称

NODE_HOSTNAME="--hostname-override=192.168.1.12"

# API服务地址

KUBE_MASTER="--master=http://192.168.1.10:8080"

EOF

9)kube-proxy服务启动文件
新建 vim /usr/lib/systemd/system/kube-proxy.service

#添加一下配置
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/kube-proxy
ExecStart=/usr/bin/kube-proxy \
${KUBE_LOGTOSTDERR} \
${KUBE_LOG_LEVEL} \
${NODE_HOSTNAME} \
${KUBE_MASTER}
Restart=on-failure
[Install]
WantedBy=multi-user.target

10)启动kube-proxy服务(加入开机自启项)

systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy

12.部署Flannel网络(kube-master0)
1)下载安装

wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
tar zxvf flannel-v0.11.0-linux-amd64.tar.gz
cp flanneld mk-docker-opts.sh /usr/bin

2)Flannel服务启动文件
新建 vim /usr/lib/systemd/system/flanneld.service

#添加一下配置
[Unit]
Description=flanneld overlay address etcd agent
After=network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/etc/sysconfig/flannel
ExecStart=/usr/bin/flanneld -etcd-endpoints=http://192.168.1.10:2379 $FLANNEL_OPTIONS

[Install]
RequiredBy=docker.service
WantedBy=multi-user.target

3)启动服务

etcdctl set /coreos.com/network/config '{ "Network": "172.16.0.0/16" }'
systemctl daemon-reload && systemctl  restart flanneld && systemctl  enable flanneld && systemctl  status flanneld

4)Flannel开机自动失败处理
新建 vim /app/etcdctl.sh

#添加如下
#!/bin/bash
/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "172.16.0.0/16" }'
service flanneld restart

编辑 vim /etc/rc.d/rc.local

#添加如下
/app/etcdctl.sh

赋予执行权限

chmod +x /app/etcdctl.sh /etc/rc.d/rc.local

重启服务器查看效果

[root@kube-node1 ~]# reboot
[root@kube-node1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:b8:f9:1e brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.10/24 brd 192.168.1.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 240e:82:702:b92c:af1a:d297:2453:eee/64 scope global noprefixroute dynamic
       valid_lft 258996sec preferred_lft 172596sec
    inet6 fe80::ef22:2a3b:f85c:1a69/64 scope link noprefixroute
       valid_lft forever preferred_lft forever
3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none
    inet 172.16.55.0/32 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::19fa:9361:7995:e98b/64 scope link flags 800
       valid_lft forever preferred_lft forever

13.部署Flannel网络(kube-node1)
1)下载安装

wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
tar zxvf flannel-v0.11.0-linux-amd64.tar.gz
cp flanneld mk-docker-opts.sh /usr/bin

2)Flannel服务配置文件
新建 vim /etc/kubernetes/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=http://192.168.1.10:2379"

2)Flannel服务启动文件
新建 vim /usr/lib/systemd/system/flanneld.service

#添加一下配置
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/etc/kubernetes/flanneld
ExecStart=/usr/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/usr/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target

3)修改docker启动文件
编辑 vim /usr/lib/systemd/system/docker.service

新增下面两行
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
#ExecStart=/usr/bin/dockerd #注释掉

4)启动服务

systemctl daemon-reload && systemctl  restart flanneld && systemctl  enable flanneld && systemctl  status flanneld && systemctl  restart  docker && systemctl  status  docker

5)查看flannel服务是否生成ip

[root@kube-node1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:e1:e0:4c brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.13/24 brd 192.168.1.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 240e:82:702:b92c:30ca:85e4:b3c:f0ce/64 scope global noprefixroute dynamic
       valid_lft 258852sec preferred_lft 172452sec
    inet6 fe80::dd63:6469:5b6b:fce4/64 scope link noprefixroute
       valid_lft forever preferred_lft forever
3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none
    inet 172.16.3.0/32 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::d209:78ad:f216:9075/64 scope link flags 800
       valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:f9:89:02:6b brd ff:ff:ff:ff:ff:ff
    inet 172.16.3.1/24 scope global docker0
       valid_lft forever preferred_lft foreve

14.查看master各node个节点状态

[root@kube-master0 ~]# kubectl get nodes
NAME         STATUS    ROLES     AGE       VERSION
kube-node1   Ready     <none>    1m        v1.8.13

15.测试部署

运行测试
[root@kube-master0 app]# kubectl run my-alpine --image=alpine --replicas=2 ping www.baidu.com
deployment.apps/my-alpine created
查询运行状态
[root@kube-master0 app]# kubectl get pods
NAME                         READY     STATUS    RESTARTS   AGE
my-alpine-7b44b58b95-f6xjv   1/1       Running   0          28s
my-alpine-7b44b58b95-fnt89   1/1       Running   0          28s
查看测试结果
[root@kube-master0 app]# kubectl logs my-alpine-7b44b58b95-f6xjv
PING www.baidu.com (220.181.38.149): 56 data bytes
64 bytes from 220.181.38.149: seq=0 ttl=54 time=5.359 ms
64 bytes from 220.181.38.149: seq=1 ttl=54 time=4.584 ms
64 bytes from 220.181.38.149: seq=2 ttl=54 time=4.553 ms
64 bytes from 220.181.38.149: seq=3 ttl=54 time=8.872 ms
64 bytes from 220.181.38.149: seq=4 ttl=54 time=4.971 ms

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐