1、 拓扑如下:
这里写图片描述

为了更方便的管理Docker网络,我们这一讲使用openvswitch网桥的方式来对容器网络进行管理,实现容器启动后能够互通。Open vSwitch是一个高质量的、多层虚拟交换机。通过构建隧道的方式使得,两个内网网络能够互相通信。

1、 给每台Docker所在主机,预先分配好IP地址:
18.141:

[root@bigdata2 ~]# cat /etc/sysconfig/docker-network 
# /etc/sysconfig/docker-network
DOCKER_NETWORK_OPTIONS=--bip=172.17.0.1/16

18.142:

[root@bigdata2 ~]# cat /etc/sysconfig/docker-network 
# /etc/sysconfig/docker-network
DOCKER_NETWORK_OPTIONS=--bip=172.16.0.1/16

注意除了上面的方式,还可以通过编辑/etc/docker/daemon.json文件,然后重启docker来实现(但是经过试验,发现下面的方式和上面的方式不能同时配置,不然启动docker启动不了):
这里写图片描述
这里写图片描述

另外如果删除已经有的路由,可以通过如下命令:

[root@bigdata2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.18.1    0.0.0.0         UG    100    0        0 enp3s0
192.168.18.0    0.0.0.0         255.255.255.0   U     100    0        0 enp3s0
192.168.18.0    0.0.0.0         255.255.255.0   U     100    0        0 enp3s0
192.168.200.0   0.0.0.0         255.255.255.0   U     0      0        0 docker0
[root@bigdata2 ~]# route del -net 192.168.200.0/24 gw 0.0.0.0
[root@bigdata2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.18.1    0.0.0.0         UG    100    0        0 enp3s0
192.168.18.0    0.0.0.0         255.255.255.0   U     100    0        0 enp3s0
192.168.18.0    0.0.0.0         255.255.255.0   U     100    0        0 enp3s0
[root@bigdata2 ~]#

2、 安装openvswitch(两台都执行):

[root@bigdata2 ~]# yum install gcc make python-devel openssl-devel kernel-devel graphviz    kernel-debug-devel autoconf automake rpm-build redhat-rpm-config  libtool selinux-policy-devel –y
[root@bigdata2 ~]#cd /usr/local/src
[root@bigdata2 ~]#wget http://openvswitch.org/releases/openvswitch-2.7.0.tar.gz
[root@bigdata2 ~]#mkdir -p ~/rpmbuild/SOURCES
[root@bigdata2 ~]#cp openvswitch-2.7.0.tar.gz ~/rpmbuild/SOURCES/
[root@bigdata2 SOURCES]#cd /root/rpmbuild/SOURCES
[root@bigdata2 SOURCES]#pwd
/root/rpmbuild/SOURCES
[root@bigdata2 SOURCES]# tar -zxvf openvswitch-2.7.0.tar.gz
[root@bigdata2 SOURCES]# sed 's/openvswitch-kmod, //g' openvswitch-2.7.0/rhel/openvswitch.spec > openvswitch-2.7.0/rhel/openvswitch_no_kmod.spec
[root@bigdata2 SOURCES] #rpmbuild -bb --without check openvswitch-2.7.0/rhel/openvswitch_no_kmod.spec
执行完成之后,到/root/rpmbuild目录下发现有新的目录生成:
[root@bigdata2 rpmbuild]# pwd
/root/rpmbuild
[root@bigdata2 rpmbuild]# ls
BUILD  BUILDROOT  RPMS  SOURCES  SPECS  SRPMS
[root@bigdata2 rpmbuild]#
##编译之后,x86目录下:
[root@bigdata2 src]# cd ~/rpmbuild/RPMS/x86_64/
[root@bigdata2 x86_64]# ls
openvswitch-2.7.0-1.x86_64.rpm  openvswitch-debuginfo-2.7.0-1.x86_64.rpm  openvswitch-devel-2.7.0-1.x86_64.rpm
[root@bigdata2 x86_64]# yum localinstall -y openvswitch-2.7.0-1.x86_64.rpm
[root@bigdata2 x86_64]# systemctl start openvswitch

3、 安装网桥管理工具:

[root@bigdata2 x86_64]# yum -y install bridge-utils

4、 部署ovs路由:18.141上操作。

4、1:在两个主机上创建隧道网桥br0,并通过VXLAN协议创建隧道

[root@bigdata2 x86_64]# ovs-vsctl add-br br0
#要注意的是下面的ip是要连接的机器的ip地址
[root@bigdata2 x86_64]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.18.142
#添加br0到本地docker0,使得容器流量通过ovs流经tunnel
[root@bigdata2 x86_64]# brctl addif docker0 br0
[root@bigdata2 x86_64]# ip link set dev br0 up
[root@bigdata2 x86_64]# ip link set dev docker0 up
#添加一条路由,使得寻找16的地址主机,都转向docker0
[root@bigdata2 x86_64]# ip route add 172.16.0.0/16 dev docker0

4、2:在18.142上面做相同操作,修改一下对应的IP即可:

[root@bigdata2 x86_64]# scp openvswitch-2.7.0-1.x86_64.rpm root@192.168.18.142:/usr/local/src/
[root@node3 src]# systemctl start openvswitch
[root@bigdata2 x86_64]# ovs-vsctl add-br br0
[root@bigdata2 x86_64]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.18.141
#添加br0到本地docker0,使得容器流量通过ovs流经tunnel
[root@bigdata2 x86_64]# brctl addif docker0 br0
[root@bigdata2 x86_64]# ip link set dev br0 up
[root@bigdata2 x86_64]# ip link set dev docker0 up
#添加一条路由,使得寻找16的地址主机,都转向docker0
[root@bigdata2 x86_64]# ip route add 172.17.0.0/16 dev docker0

4.3:查看一下相关路由,把上一讲我们添加的路由全部删掉:

[root@bigdata2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.63.254  0.0.0.0         UG    100    0        0 ens33
172.16.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.18.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker_gwbridge
192.168.63.0    0.0.0.0         255.255.255.0   U     100    0        0 ens33

5、 为了方便测试,我们构建一个nginx镜像:

[root@node3 test]# tree
├── default_nginx.conf
├── Dockerfile
└── index.html
##########
[root@node3 test]# cat Dockerfile 
from lnmp/nginx:1.0
ADD index.html /web/ 
ADD default_nginx.conf /usr/local/nginx/conf/vhosts/default.conf 
EXPOSE 80 
CMD ["/usr/local/nginx/sbin/nginx"]
##########
[root@node3 test]# cat default_nginx.conf 
server {
    listen       80 default_server;
    server_name  localhost;
    index        index.html;
    root         /web;
}
########
[root@node3 test]# cat index.html 
80

6、 创建镜像并且启动:

18.142:
[root@node3 test]#docker build -t test/nginx:1.0 .
[root@node3 test]#docker ps
[root@node3 test]#docker run -dit -P test/nginx:1.0
[root@node3 test]# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                   NAMES
78845f5cb91b        test/nginx:1.0      "/usr/local/nginx/..."   10 minutes ago      Up 10 minutes       0.0.0.0:32768->80/tcp   eager_euler
[root@node3 test]# docker exec -it 78845f5cb91b /bin/bash
[root@78845f5cb91b /]# ifconfig
eth0      Link encap:Ethernet  HWaddr 02:42:AC:10:00:02  
          inet addr:172.16.0.2  Bcast:0.0.0.0  Mask:255.255.0.0
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:72 errors:0 dropped:0 overruns:0 frame:0
          TX packets:35 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:6454 (6.3 KiB)  TX bytes:3970 (3.8 KiB)

18.141启动一个:

[root@bigdata2 src]# docker run -it lnmp/nginx:1.0 /bin/bash
[root@dcff2de3f060 /]# ifconfig
eth0      Link encap:Ethernet  HWaddr 02:42:AC:11:00:03  
          inet addr:172.17.0.3  Bcast:0.0.0.0  Mask:255.255.0.0
          inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:7 errors:0 dropped:0 overruns:0 frame:0
          TX packets:7 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:578 (578.0 b)  TX bytes:578 (578.0 b)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1 
          RX bytes:0 (0.0 b)  TX bytes:0 (0.0 b)

[root@dcff2de3f060 /]# curl 172.16.0.2
80
[root@dcff2de3f060 /]# curl 172.16.0.2
80

### 怕错:我们可以查看一下ovs想对应的状态:
[root@node3 test]# ovs-vsctl show
1f6c6cde-0cb2-41da-a1b9-999c80a9512d
    Bridge "br0"
        Port "br0"
            Interface "br0"
                type: internal
        Port "gre1"
            Interface "gre1"
                type: gre
                options: {remote_ip="192.168.18.141"}
    ovs_version: "2.7.0"
Logo

权威|前沿|技术|干货|国内首个API全生命周期开发者社区

更多推荐