Docker安装及镜像配置(常用命令介绍)
一、安装Docker官网 https://docs.docker.com/engine/install/centos/# 1.卸载旧的版本yum remove docker \docker-client \docker-client-latest \docker-common \docker-latest \docker-latest-logrotate \docker-logrotate \do
一、安装Docker
官网 https://docs.docker.com/engine/install/centos/
# 1.卸载旧的版本
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
# 2.需要的安装包
yum install -y yum-utils
# 3.设置镜像仓库
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo # 默认为国外镜像地址
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo # 阿里云镜像
# (可选)更新软件包索引
yum makecache fast
# 4.安装docker ce-社区版 ee-企业版
yum install docker-ce docker-ce-cli containerd.io
# 5.启动docker
systemctl start docker
# 6.查看docker是否安装成功
docker version
# 7.hello-world
docker run hello-world
# 8.查看hello-world镜像存不存在
docker images
# 卸载docker
# 1.卸载docker依赖,对应安装步骤4
yum remove docker-ce docker-ce-cli containerd.io
# 2.删除资源(docker的默认工作路径/var/lib/docker)
rm -rf /var/lib/docker
二、阿里云镜像加速
1.登录阿里云控制台
2.搜索容器镜像服务
3.根据镜像加速器提供的加速器地址进行配置
Docker的常用命令
帮助命令
# docker版本信息
docker version
# docker系统信息
docker info
# 帮助命令
docker 命令 --help
官网帮助文档:https://docs.docker.com/reference/
镜像命令
docker images
# 查看所有本地主机上的镜像
docker images
-a 显示所有
-q 只显示镜像id
docker search
# 搜索镜像
docker search 镜像
docker pull
# 下载镜像,默认使用最新版
# docker pull 镜像[:tag]
[root@porty ~]# docker pull mysql
Using default tag: latest # 不写tag默认就是latest
latest: Pulling from library/mysql
6ec7b7d162b2: Pull complete # 分层下载, docker image核心
fedd960d3481: Pull complete
7ab947313861: Pull complete
64f92f19e638: Pull complete
3e80b17bff96: Pull complete
014e976799f9: Pull complete
59ae84fee1b3: Pull complete
ffe10de703ea: Pull complete
657af6d90c83: Pull complete
98bfb480322c: Pull complete
9f2c4202ac29: Pull complete
a369b92bfc99: Pull complete
Digest: sha256:365e891b22abd3336d65baefc475b4a9a1e29a01a7b6b5be04367fcc9f373bb7 # 签名
Status: Downloaded newer image for mysql:latest
docker.io/library/mysql:latest # 真实地址
# 指定版本下载
[root@porty ~]# docker pull mysql:5.7
5.7: Pulling from library/mysql
6ec7b7d162b2: Already exists
fedd960d3481: Already exists
7ab947313861: Already exists
64f92f19e638: Already exists
3e80b17bff96: Already exists
014e976799f9: Already exists
59ae84fee1b3: Already exists # 分层下载,已存在的层可以共用
7d1da2a18e2e: Pull complete
301a28b700b9: Pull complete
979b389fc71f: Pull complete
403f729b1bad: Pull complete
Digest: sha256:d4ca82cee68dce98aa72a1c48b5ef5ce9f1538265831132187871b78e768aed1
Status: Downloaded newer image for mysql:5.7
docker.io/library/mysql:5.7
docker rmi
# 删除指定的镜像Id
# docker rmi -f 镜像id(也可以指定名称,如果是多个可用空格隔开)
docker rmi -f 镜像id
# 删除多个镜像
docker rmi -f 镜像id 镜像id 镜像id...
# 删除所有,查出所有的镜像id去递归删除
docker rmi -f $(docker images -aq)
容器命令
说明:有了镜像才能创建容器,下载一个centos作为测试
docker pull centos
新建容器并启动
docker run [可选参数] image
# 参数
--name="Name" # 容器名字
-d # 后台运行
-it # 使用交互方式进行,进入容器
-p # 指定容器的端口
-p ip:主机端口:容器端口
-p 主机端口:容器端口(常用)
-p 容器端口
容器端口
-P # 随机指定端口
# 测试,启动并进入容器
[root@porty ~]# docker run -it centos /bin/bash
[root@0bb81f60254c /]#
# 退出容器
docker exit
查看运行中的容器
# 查看运行中的容器
docker ps
-a # 包括历史运行过的容器
-n666 # 显示最近创建的666条容器信息
-q # 只显示容器编号
退出容器
# 容器停止并退出
exit
# 容器不停止退出
Ctrl + P + Q
删除容器
# 删除指定容器,不能删除正在运行的容器,强制删除需要选项 -f
docker rm 容器id
# 删除所有容器
docker rm -f $(docker ps -aq)
# 删除所有容器
docker ps -aq | xargs docker rm -f
启动和停止容器
# 启动容器
docker start 容器id
# 重启容器
docker restart 容器id
# 停止容器
docker stop 容器id
# 强制杀掉容器
docker kill 容器id
常用其它命令
后台启动容器
# 后台创建容器,因为没有前台进程,容器启动完会自动停止
docker run -d 镜像id
# 后台创建容器并保持运行状态
docker run -itd 镜像id
查看日志
docker logs 容器id
-t # 显示日志的时间戳
-f # 不间断
-n10 # 展示最新10条记录
docker logs -tfn10 容器id
查看容器中的进程信息
docker top 容器id
# 示例
[root@porty ~]# docker top b
UID PID PPID C STIME TTY TIME CMD
root 25250 25230 0 16:22 ? 00:00:00 /bin/bash
查看镜像的元数据
docker inspect 容器id
进入当前正在运行的容器
# 以交互式方式进入正在运行的容器,进入容器后开启一个新的终端
docker exec -it 容器id /bin/bash
# 以交互式方式进入正在运行的容器,进入容器正在执行的终端,不会启动新的终端
docker attach 容器id
从容器内拷贝文件到主机
docker cp 容器id:源文件位置 主机目标位置
# 示例
docker cp b:/porty.java /root/
Docker安装Nginx
# 1.搜索nginx镜像
docker search nginx
# 2.拉取nginx镜像
docker pull nginx
# 3.创建nginx容器并匹配端口
[root@porty var]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
nginx latest ae2feff98a0c 2 days ago 133MB
centos latest 300e315adb2f 10 days ago 209MB
[root@porty var]# docker run -d --name nginx01 -p 3344:80 nginx
13389d744c10d169c67bbb11b28f1ea2caebe2d9d1b18d8066010fb1159b4522
[root@porty var]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
13389d744c10 nginx "/docker-entrypoint.…" 5 seconds ago Up 3 seconds 0.0.0.0:3344->80/tcp nginx01
# 4.测试
[root@porty var]# curl localhost:3344
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@porty var]#
容器外部修改容器内的配置文件
-v
Docker安装Tomcat
# 官方的使用,用来测试,用完会清除掉 --rm
docker run -it --rm tomcat:9.0
docker run -it --rm -p 8888:8080 tomcat:9.0
# 拉取tomcat镜像
docker pull tomcat:9.0
# 后台方式启动容器
docker run -d --name=tomcat01 -p 8888:8080 镜像id
# 测试
[root@porty ~]# curl localhost:8888
<!doctype html><html lang="en"><head><title>HTTP Status 404 – Not Found</title><style type="text/css">body {font-family:Tahoma,Arial,sans-serif;} h1, h2, h3, b {color:white;background-color:#525D76;} h1 {font-size:22px;} h2 {font-size:16px;} h3 {font-size:14px;} p {font-size:12px;} a {color:black;} .line {height:1px;background-color:#525D76;border:none;}</style></head><body><h1>HTTP Status 404 – Not Found</h1><hr class="line" /><p><b>Type</b> Status Report</p><p><b>Description</b> The origin server did not find a current representation for the target resource or is not willing to disclose that one exists.</p><hr class="line" /><h3>Apache Tomcat/9.0.41</h3></body></html>
Docker安装ElasticSearch、Kibana及配置
# elasticsearch 特别耗内存
# --net somenetwork 网络配置
# 启动elasticsearch
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.6.2
# 启动之后 linux会特别卡 docker stats 查看cpu状态
# 内存小的最好给elasticsearch设置内存限制再启动
# 一般修改配置文件,这边加上-e 环境配置修改
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.6.2
# 安装kibana 版本对应elasticsearch版本
# --net somenetwork 网络配置
docker run -d --name kibana -p 5601:5601 kibana:7.6.2
# 因为docker的容器环境是互相隔离的,所有kibana并不能直接连接到elasticsearch,需要借助内网转发
# 此处要让kibana连接上elasticsearch需要将两者都进行同一个网络配置
# 创建网络
docker network create somenetwork
# 限制内存启动elasticsearch容器 使用刚刚创建的网络块
docker run -d --name elasticsearch --net somenetwork -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.6.2
# 启动kibana容器 使用刚刚创建的网络块
docker run -d --name kibana --net somenetwork -p 5601:5601 kibana:7.6.2
可视化
- portainer
# 启动portainer可视化面板容器
docker run -d -p 8088:9000 --restart=always -v /var/run/docker.sock:/var/run/docker.sock --privileged=true portainer/portainer
# 访问可视化界面 localhost替换成服务器地址
localhost:8088
- Rancher(CI/CD)
Docker镜像
什么是镜像
镜像是一种轻量级、可执行的独立软件包,用来打包软件运行环境和基于运行环境开发的软件,它包含运行某个软件所需的所有内客,包括代码、运行时、库、环境变量和配置文件。
Docker镜像加载原理
docker镜像加载是采用分层加载的,涉及UnionFS(联合文件系统)。
bootfs(boot File System): boot引导器+内核,引导完会将系统的使用权交给内核管理,卸载掉引导器。每个系统无差异化,共用
rootfs(root File System):root为Ubuntu、centos等,已经形成了各种文件系统,可能有差异化,不可共用
假如加载了两个镜像,那么他的bootfs并不需要去管,因为docker是借助宿主机的内核。所以docker加载镜像的时候只会操作到rootfs,再者,假如第二个镜像在加载的时候有某些镜像层在第一镜像加载时加载过了。那么他将不需要再次加载。
分层理解
Commit镜像
命令
# 提交容器成为一个新的副本
docker commit
# 选项
docker commit -m="提交的描述信息" -a="作者" 容器id 目标镜像名:[版本]
测试
# 启动一个tomcat,默认webapps文件夹里面是没有东西的
# 将webapps.dist文件夹的所有文件拷贝到webapps文件夹中
# 将这个容器打包成一个镜像
docker commit -a="porty" -m="add some default pages." 00ac20eb7e81 porty_tomcat:1.0
# 提交之后就会生成一个新的镜像在本地镜像库,docker images可查到
docker images
#
容器数据卷
什么是容器数据卷
docker的理念
将应用和环境打包成一个镜像!
容器数据卷
目录的挂载
使用数据卷
方式一:直接使用命令挂在 -v
docker run -it -v 主机目录地址:容器内的目录
# 测试
docker run -it -v /root/porty:/home centos /bin/bash
# 主机或者容器在目录创建的东西都会同步到容器或主机。
# 容器停止,主机在挂在目录进行操作,容器也会同步
# 即使容器删除,挂在的目录下的文件也不会丢失
实战Mysql
# mysql的数据是重要的,因此需要挂在目录
# mysql容器创建时,需要设置密码 -e MYSQL_ROOT_PASSWORD=my-secret-pw
# -e 选项是代表环境变量配置
# 官方:docker run --name some-mysql -e MYSQL_ROOT_PASSWORD=my-secret-pw -d mysql:tag
docker run -d -p 3310:3306 -v /root/mysql/conf:/etc/mysql/conf.d -v /root/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql01 镜像id
# 启动成功后使用远程工具连接服务器,端口为3310,如果成功了证明mysql容器创建并成功运行
# 再查看挂在的目录是否有对应的数据文件
具名和匿名挂载
# 匿名挂载
-v 容器内路径
docker run -d -P --name nginx01 -v /etc/nginx nginx
# 查看所有卷信息
docker volume ls
DRIVER VOLUME NAME
local 8efbbdb6edc2e189a8412ad9673a9ccf044f2d9f16cd65b2729078d87d5ee0dd
# 具名挂在
-v 主机路径:容器内路径
docker run -d -P --name nginx02 -v juming-nginx:/etc/nginx nginx
docker volume ls
DRIVER VOLUME NAME
local juming-nginx
# 查看卷详细信息
docker volume inspect 卷名
docker volume inspect juming-nginx
[
{
"CreatedAt": "2020-12-22T10:27:17+08:00",
"Driver": "local",
"Labels": null,
"Mountpoint": "/var/lib/docker/volumes/juming-nginx/_data",
"Name": "juming-nginx",
"Options": null,
"Scope": "local"
}
]
# 所有的容器卷,在没指定目录的情况下都是在 /var/lib/docker/volumes/卷名(juming-nginx)/_data
# 拓展
# 只读 read only,容器内只读,要修改只能通过主机来修改
docker run -d -v juming-nginx:/etc/nginx:ro nginx
# 可读写 read write
docker run -d -v juming-nginx:/etc/nginx:rw nginx
初识Dockerfile
Dockerfile 就是用来构建docker镜像的构建文件,命令脚本
# 使用Dockerfile构建镜像
# 创建dockerfile1文件,进行编写
FROM centos
VOLUME ["volume1","volume2"]
CMD echo "------end------"
CMD /bin/bash
# 构建镜像 最后的.符号不能缺失
[root@porty docker-test-volume]# docker build -f /root/docker-test-volume/dockerfile1 -t porty-centos:1.0 .
Sending build context to Docker daemon 2.048kB
Step 1/4 : FROM centos
---> 300e315adb2f
Step 2/4 : VOLUME ["volume1","volume2"]
---> Running in 6b7f951b1727
Removing intermediate container 6b7f951b1727
---> 27c4df35d87c
Step 3/4 : CMD echo "------end------"
---> Running in 224c593c89f8
Removing intermediate container 224c593c89f8
---> 5757bc3bc547
Step 4/4 : CMD /bin/bash
---> Running in 51414801f7a0
Removing intermediate container 51414801f7a0
---> abab4447f260
Successfully built abab4447f260
Successfully tagged porty-centos:1.0
[root@porty docker-test-volume]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
porty-centos 1.0 abab4447f260 About a minute ago 209MB
nginx latest ae2feff98a0c 6 days ago 133MB
mysql 5.7 697daaecf703 10 days ago 448MB
centos latest 300e315adb2f 2 weeks ago 209MB
# 使用创建的镜像创建容器,并进入容器查看根目录下的两个卷挂载目录是否存在
# 在主机使用docker inspect 容器id 查看容器的数据,可以看到卷挂载的对应信息
数据卷容器
# 测试多个容器共享数据
# 创建docker01容器(数据卷容器)
docker run -it --name docker01 abab4447f260
# 创建docker02容器并继承docker01
docker run -it --name docker02 --volumes-from docker01 abab4447f260
# 创建docker03容器并继承docker01
docker run -it --name docker03 --volumes-from docker01 abab4447f260
# 此时他们的数据卷目录的文件都是共享的
# 即使父容器docker01被删除,其他两个容器还是能访问到共享目录的文件,而且能继续同步
多个mysql实现同步数据
# mysql01
docker run -d -p 3310:3306 -v /root/mysql/conf:/etc/mysql/conf.d -v /root/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql02 镜像id
# mysql02
docker run -d -p 3311:3306 -v /root/mysql/conf:/etc/mysql/conf.d -v /root/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql02 镜像id
DockerFile
DockerFile介绍
DockerFile 是用来构建docker镜像的文件!命令参数脚本!
构建步骤:
1.编写一个dockfile文件
2.命令docker build 构建成为一个镜像
3.docker run 利用镜像创建容器
4.docker push发布镜像(DockerHub、阿里云镜像仓库)
DockerFile构建过程
注意:
1.每个关键字(指令)都是必须是大写字母
2.执行从上到下顺序执行
3.表示注释
4.每一个指令都会创建提交一个新的镜像层,并提交!
DockerFile指令
FROM # 基础镜像,一切从这里开始构建
MAINTAINER # 镜像是谁写的, 姓名+邮箱
RUN # 镜像构建的时候需要运行的命令
ADD # 步骤,tomcat镜像,tomcat压缩包!添加内容
WORKDIR # 镜像的工作目录
VOLUME # 挂在的目录位置
EXPOSE # 暴露端口
CMD # 指定容器启动的时候要运行的命令,只有最后一个会生效,可被替代
ENTRYPOINT # 指定容器启动的时候要运行的命令,可以追加命令
ONBUILD # 当构建一个被继承 DockerFile,这个时候就会运行ONBUILD的指令,触发指令
COPY # 类似ADD命令,将文件拷贝到镜像中
ENV # 构建的时候设置环境变量
实战测试
创建一个centos
# 编写dockerfile文件
cat mydockerfile-centos
FROM centos
MAINTAINER porty<1527957705@qq.com>
ENV MYPATH /use/local
WORKDIR $MYPATH
RUN yum -y install vim
RUN yum -y install net-tools
EXPOSE 80
CMD echo $MYPATH
CMD echo "------end------"
CMD /bin/bash
# 构建镜像
docker build -f mydockerfile-centos -t mycentos:0.1 .
# 执行结束
Successfully built 282d2b0414a8
Successfully tagged mycentos:0.1
CMD和ENTRYPOINT的区别
CMD # 指定容器启动的时候要运行的命令,只有最后一个会生效,可被替代
ENTRYPOINT # 指定容器启动的时候要运行的命令,可以追加命令
# CMD测试
# 编写dockerfile文件 dockerfile-test-cmd
FROM centos
CMD ["ls","-a"]
# 构建镜像
docker build -f dockerfile-test-cmd -t cmdtest .
# 运行镜像,会根据ls -a命令输出目录下的所有东西
docker run cmdtest
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
# 如果在运行容器的时候追加命令 -l 会报错
docker run cmdtest -l
docker: Error response from daemon: OCI runtime create failed: container_linux.go:370: starting container process caused: exec: "-l": executable file not found in $PATH: unknown.
# cmd的情况下 -l 替换了["ls","-a"]命令,-l不是命令 所以报错!
# ENTRYPOINT测试
# 编写dockerfile文件 dockerfile-test-entrypoint
FROM centos
ENTRYPOINT ["ls","-a"]
# 构建镜像
docker build -f dockerfile-test-entrypoint -t entrypointtest .
# 运行镜像,会根据ls -a命令输出目录下的所有东西
docker run entrypointtest
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
# 如果在运行容器的时候追加命令-l可以直接追加上
docker run entrypointtest -l
total 56
drwxr-xr-x 1 root root 4096 Dec 22 08:08 .
drwxr-xr-x 1 root root 4096 Dec 22 08:08 ..
-rwxr-xr-x 1 root root 0 Dec 22 08:08 .dockerenv
lrwxrwxrwx 1 root root 7 Nov 3 15:22 bin -> usr/bin
drwxr-xr-x 5 root root 340 Dec 22 08:08 dev
drwxr-xr-x 1 root root 4096 Dec 22 08:08 etc
drwxr-xr-x 2 root root 4096 Nov 3 15:22 home
lrwxrwxrwx 1 root root 7 Nov 3 15:22 lib -> usr/lib
lrwxrwxrwx 1 root root 9 Nov 3 15:22 lib64 -> usr/lib64
drwx------ 2 root root 4096 Dec 4 17:37 lost+found
drwxr-xr-x 2 root root 4096 Nov 3 15:22 media
drwxr-xr-x 2 root root 4096 Nov 3 15:22 mnt
drwxr-xr-x 2 root root 4096 Nov 3 15:22 opt
dr-xr-xr-x 100 root root 0 Dec 22 08:08 proc
dr-xr-x--- 2 root root 4096 Dec 4 17:37 root
drwxr-xr-x 11 root root 4096 Dec 4 17:37 run
lrwxrwxrwx 1 root root 8 Nov 3 15:22 sbin -> usr/sbin
drwxr-xr-x 2 root root 4096 Nov 3 15:22 srv
dr-xr-xr-x 13 root root 0 Oct 21 07:30 sys
drwxrwxrwt 7 root root 4096 Dec 4 17:37 tmp
drwxr-xr-x 12 root root 4096 Dec 4 17:37 usr
drwxr-xr-x 20 root root 4096 Dec 4 17:37 var
实战:Tomcat镜像
# 1.准备好tomcat和jdk
ll
total 150964
-rw-r--r-- 1 root root 11442169 Dec 3 20:42 apache-tomcat-9.0.41.tar.gz
-rw-r--r-- 1 root root 143142634 Dec 15 07:32 jdk-8u271-linux-x64.tar.gz
# 2.编写dockerfile文件,如果文件名为Dockerfile的话,build项目就不用-f 指定dockerfile文件了
FROM centos
MAINTAINER porty<123456@qq.com>
COPY readme.txt /usr/local/readme.txt
ADD apache-tomcat-9.0.41.tar.gz /usr/local/
ADD jdk-8u271-linux-x64.tar.gz /usr/local/
RUN yum -y install vim
ENV MYPATH /usr/local
WORKDIR $MYPATH
ENV JAVA_HOME /usr/local/jdk1.8.0_271
ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.41
ENV CATALINA_BASH /usr/local/apache-tomcat-9.0.41
ENV PATH $PATH:$JAVA_HOME/bin:$CATALINA_HOME/lib:$CATALINA_HOME/bin
EXPOSE 8080
CMD /usr/local/apache-tomcat-9.0.41/bin/startup.sh && tail -F /usr/local/apache-tomcat-9.0.41/logs/catalina.out
# 3.构建镜像,因为此处的dockerfile文件名为Dockerfile所以不需要用-f指定dockerfile文件
docker build -t porty_tomcat .
# 4.查看镜像
docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
porty_tomcat latest 69e66cedbeb9 About a minute ago 639MB
# 5.启动镜像并挂载目录
docker run -d -p 8080:8080 --name porty_tomcat01 -v /root/dockerfiles/tomcat/test:/usr/local/apache-tomcat-9.0.41/webapps/test -v /root/dockerfiles/tomcat/logs:/usr/local/apache-tomcat-9.0.41/logs porty_tomcat
# 6.访问测试,显示tomcat页面
curl localhost:8080
# 测试发布项目
# index.jsp文件
<%@ page language="java" contentType="text/html; charset=UTF-8"
pageEncoding="UTF-8"%>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>porty</title>
</head>
<body>
Hello World!<br/>
<%
System.out.println("I'm porty!");
%>
</body>
</html>
# WEB-INF/web.xml文件
<?xml version="1.0" encoding="UTF-8"?>
<web-app
version="3.0"
xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
</web-app>
# 进行访问测试 ip:8080/test
发布自己的镜像
DockerHub
1.https://hub.docker.com 注册账号
2.确保账号无误
3.服务器上提交镜像到DockerHub
# 登录命令
[root@porty ~]# docker login --help
Usage: docker login [OPTIONS] [SERVER]
Log in to a Docker registry.
If no server is specified, the default is defined by the daemon.
Options:
-p, --password string Password
--password-stdin Take the password from stdin
-u, --username string Username
# 登录
[root@porty ~]# docker login -u 账号-p 密码
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
# 提交镜像,尽量带上版本号
# 1.需要在dockerhub建立一个仓库 例如porty_hub/porty_tomcat
# 2.需要将服务器上的docker镜像复制出一个名字为仓库的名字的镜像
docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]
docker tag porty_tomcat porty_hub/porty_tomcat
# 3.使用docker push 命令上传到dockerhub的对应仓库,以下测试为正在提交中
[root@porty ~]# docker push porty_hub/porty_tomcat
Using default tag: latest
The push refers to repository [docker.io/porty_hub/porty_tomcat]
c23b49b4a91b: Pushing [================> ] 18.96MB/58.02MB
a1fce1a8dd54: Pushing [==> ] 19.68MB/355.3MB
35dddb931ac2: Pushing [======================> ] 7.154MB/15.83MB
f084de1cda6b: Pushed
2653d992f4ef: Pushing [=> ] 5.471MB/209.3MB
# 提交的时候也是按照镜像的层级分层提交的
发布到阿里云镜像服务商
1.登录阿里云
2.找到容器镜像服务
3.创建命名空间
4.创建镜像仓库
5.进入镜像仓库浏览详细信息
# 如果之前登录过dockerhub,先退出dockerhub的登录
docker logout
# 登录阿里云镜像服务,密码为容器服务密码,如果忘记可以在容器服务的访问凭证页面重设
[root@porty ~]# docker login --username=18923512289 registry.cn-beijing.aliyuncs.com
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
# 根据镜像仓库的提交信息对镜像名称进行修改
# docker tag [ImageId] registry.cn-beijing.aliyuncs.com/porty/porty_tomcat:[镜像版本号]
docker tag ae2feff98a0c registry.cn-beijing.aliyuncs.com/porty/porty_tomcat:1.0
# 根据镜像仓库的提交信息进行镜像提交
# docker push registry.cn-beijing.aliyuncs.com/porty/porty_tomcat:[镜像版本号]
[root@porty ~]# docker push registry.cn-beijing.aliyuncs.com/porty/porty_tomcat:1.0
The push refers to repository [registry.cn-beijing.aliyuncs.com/porty/porty_tomcat]
4eaf0ea085df: Pushed
2c7498eef94a: Pushed
7d2b207c2679: Pushed
5c4e5adc71a8: Pushing [======> ] 8.157MB/63.7MB
87c8a1d8f54f: Pushing [=====> ] 7.617MB/69.23MB
Docker网络
Docker网络-Docker0
# 查看网络
[root@porty ~]# ip addr
# 本地回环地址
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
# 阿里云内网地址
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:16:3e:2c:dc:eb brd ff:ff:ff:ff:ff:ff
inet 172.24.129.136/20 brd 172.24.143.255 scope global dynamic eth0
valid_lft 309914003sec preferred_lft 309914003sec
# docker0的地址
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:8b:db:ca:f1 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
# 三个网络代表三种环境
# 问题:服务器部署两个容器, tomcat 和 mysql ,他们之间是通讯的地址是依赖于什么?
# 测试
# 后台方式创建一个tomcat容器
docker run -d -P --name tomcat01 tomcat
# 执行容器获取tomcat容器内的网络信息,eth0@if186为docker分配
[root@porty ~]# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
185: eth0@if186: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 测试主机能否ping通docker为容器分配的地址-->是可以ping通的
[root@porty ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.094 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.061 ms
# 我们每启动一个docker容器,docker就会为容器分配一个ip
# 只要我们安装了docker,就会有一个网卡docker0,使用的技术是veth-pair技术
# 在主机再次测试ip addr
[root@porty ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:16:3e:2c:dc:eb brd ff:ff:ff:ff:ff:ff
inet 172.24.129.136/20 brd 172.24.143.255 scope global dynamic eth0
valid_lft 309912919sec preferred_lft 309912919sec
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:8b:db:ca:f1 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
# 发现多了一个网卡,而这个网卡恰好就是执行tomcat容器后,docker为tomcat容器分配的网卡
186: vethae8cfd2@if185: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 6a:c1:4b:58:63:ce brd ff:ff:ff:ff:ff:ff link-netnsid 0
# 再启动一个tomcat容器测试
docker run -d -P --name tomcat02 tomcat
# 查看tomcat2容器网卡
[root@porty ~]# docker exec -it tomcat02 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
189: eth0@if190: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:04 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.4/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 再次回到主机测试ip addr,发现除了原有的网卡信息又多出一道与docker为tomcat2容器分配的网卡信息相似的网卡
190: vethb92e282@if189: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether f6:19:b4:4b:ef:73 brd ff:ff:ff:ff:ff:ff link-netnsid 2
# 删除容器后对应的那一对网桥就会消失
# 这些容器的网卡都是一对一对出现的
# evth-pair技术,都是虚拟设备接口,一段连着协议,一段彼此相连
# 利用veth-pair的特性充当桥梁,可以连接各种虚拟网络设备
# 测试最终发现可以从tomcat01容器ping通tomcat02容器
# 容器和容器之间是可以互相ping通的,但不是直接连接,而是通过主机docker0来做桥梁的
# docker在安装时可通过 --net来指定路由器
# 如果不指定默认都由docker0来当路由器的,docker会给每个容器分配一个默认的可用IP
–link(不推荐)
思考一个场景,我们编写了一个微服务,database url=ip:,项目不重启,数据库ip换掉了,我们希望可以处理这个问题,可以名字来进行访问容器?
# 如果容器与容器之间没建立联系,是无法用容器名来ping通的,即tomcat01和tomcat02容器
# 如果之间有建立联系,如在创建容器tomcat03时使用了--link命令连接tomcat02,那么tomcat03是可以ping通tomcat02容器的,但需要注意的是这个连接是单向的,即tomcat02依旧是不能通过容器名来ping通tomcat03
[root@porty ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
mysql 5.7 f07dfa83b528 37 hours ago 448MB
tomcat latest feba8d001e3f 5 days ago 649MB
[root@porty ~]# docker run -d -P --name tomcat01 tomcat
98b8061f69765d839b32e86f8198b922dc84e3bae5cee2451d46f34761054449
[root@porty ~]# docker run -d -P --name tomcat02 tomcat
188ec57af07fe81f80f49b46a85225ae588a9acbc24c4b916d876b9ab1ff3aaf
[root@porty ~]# docker exec -it tomcat01 ping tomcat02
ping: tomcat02: Name or service not known
[root@porty ~]# docker run -d -P --name tomcat03 --link tomcat02 tomcat
50514c5a4785584ad060033876a24ad5a83f253d6cba910c87b32d6f8c4fb27f
[root@porty ~]# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.3): icmp_seq=1 ttl=64 time=0.097 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=2 ttl=64 time=0.068 ms
# --link 其实就是将tomcat02的ip信息、容器名、容器id,写到 tomcat03的/etc/hosts文件中,后面可以解析到
[root@porty ~]# docker exec -it tomcat03 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 tomcat02 188ec57af07f
172.17.0.4 50514c5a4785
自定义网络
docker network ls 查看所有的docker网络
网络模式
bridge:桥接 docker(网络)
none:不配置网络
host:和宿主机共享网络
container:容器网络连通(少用)
测试
# 之前的启动命令,没有配置网络会默认使用bridge桥接模式 --net bridge
docker run -d -P --name tomcat01 --net bridge tomcat
# docker0特点,默认,域名不能访问 --link 可以打通连接,但是比较麻烦
# 自定义网络
# docker network create --driver 网络模式(默认为bridge) --subnet 192.168.0.0/16 --gateway 192.168.0.1 网络名
[root@porty ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
b9d0ebd8a422e6f78c0738ccf10f639c2d9b35cece9a81dcce37ff6ec6932695
[root@porty ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
2cdff7f9bea0 bridge bridge local
c9a6c17a57bf host host local
b9d0ebd8a422 mynet bridge local
c9450aafacd8 none null local
# 查看网络信息
[root@porty ~]# docker network inspect b9d
[
{
"Name": "mynet",
"Id": "b9d0ebd8a422e6f78c0738ccf10f639c2d9b35cece9a81dcce37ff6ec6932695",
"Created": "2020-12-23T19:40:20.161584781+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]
# 测试运行两个tomcat容器并使用同一个自定义网络
[root@porty ~]# docker run -d -P --name tomcat01 --net mynet tomcat
a117be620c920e31da07ea6cefa9bbf030d1eed269c2585fb734cda25c035ddd
[root@porty ~]# docker run -d -P --name tomcat02 --net mynet tomcat
487367e7bfdb1eea74c0532bf3b0b6d4b82f5c8cc9c08d7a854734bb5481908b
[root@porty ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
487367e7bfdb tomcat "catalina.sh run" 2 seconds ago Up 2 seconds 0.0.0.0:49164->8080/tcp tomcat02
a117be620c92 tomcat "catalina.sh run" 7 seconds ago Up 6 seconds 0.0.0.0:49163->8080/tcp tomcat01
[root@porty ~]# docker exec -it tomcat01 ping tomcat02
PING tomcat02 (192.168.0.3) 56(84) bytes of data.
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.057 ms
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=2 ttl=64 time=0.060 ms
^C
--- tomcat02 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.057/0.058/0.060/0.007 ms
[root@porty ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.039 ms
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.057 ms
^C
--- tomcat01 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.039/0.048/0.057/0.009 ms
# 发现两个容器之间可以使用容器名进行通信
# 再次查看刚刚使用的自定义网络,发现容器标签已经有了两个容器存在,由自定义网络来维护对应的关系
[root@porty ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "b9d0ebd8a422e6f78c0738ccf10f639c2d9b35cece9a81dcce37ff6ec6932695",
"Created": "2020-12-23T19:40:20.161584781+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"487367e7bfdb1eea74c0532bf3b0b6d4b82f5c8cc9c08d7a854734bb5481908b": {
"Name": "tomcat02",
"EndpointID": "ec48cc82e6cc909b567d7c3d9364e230ee506f15d27102dab24c15786450fe19",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"a117be620c920e31da07ea6cefa9bbf030d1eed269c2585fb734cda25c035ddd": {
"Name": "tomcat01",
"EndpointID": "64a98dcd903c9a87b220489c0ff601a3c005cbd6d4a082d9d94c9cabbfc4f9f6",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
# 好处:
# 各种服务的集群使用不同的自定义网络,可以保证集群是安全和健康的
网络连通
# 再次运行两个tomcat容器,使用默认的网络
docker run -d -P --name tomcat03 tomcat
docker run -d -P --name tomcat04 tomcat
# 网络连通
[root@porty ~]# docker network connect --help
Usage: docker network connect [OPTIONS] NETWORK CONTAINER
Connect a container to a network
Options:
--alias strings Add network-scoped alias for the container
--driver-opt strings driver options for the network
--ip string IPv4 address (e.g., 172.30.100.104)
--ip6 string IPv6 address (e.g., 2001:db8::33)
--link list Add link to another container
--link-local-ip strings Add a link-local address for the container
# 测试将tomcat03容器连通到自定义的网络mynet上,然后使用tomcat03来连通mynet网络上的其他ip
docker network connect mynet tomcat03
# 再次查看mynet网络的信息,发现tomcat03的容器加入到mynet网络的维护中
[root@porty ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "b9d0ebd8a422e6f78c0738ccf10f639c2d9b35cece9a81dcce37ff6ec6932695",
"Created": "2020-12-23T19:40:20.161584781+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"487367e7bfdb1eea74c0532bf3b0b6d4b82f5c8cc9c08d7a854734bb5481908b": {
"Name": "tomcat02",
"EndpointID": "ec48cc82e6cc909b567d7c3d9364e230ee506f15d27102dab24c15786450fe19",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"a117be620c920e31da07ea6cefa9bbf030d1eed269c2585fb734cda25c035ddd": {
"Name": "tomcat01",
"EndpointID": "64a98dcd903c9a87b220489c0ff601a3c005cbd6d4a082d9d94c9cabbfc4f9f6",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"a449c360468adaa6722cbf1a70d285867f817149e1f284346108c73733f37bba": {
"Name": "tomcat03",
"EndpointID": "557facc1cb7da810a9241ba9552463eb6eb50ce0bc14ff87551f96247455c0eb",
"MacAddress": "02:42:c0:a8:00:04",
"IPv4Address": "192.168.0.4/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
# tomcat03就可以连接到tomcat01和tomcat02
[root@porty ~]# docker exec -it tomcat03 ping tomcat01
PING tomcat01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.079 ms
^C
--- tomcat01 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.079/0.079/0.079/0.000 ms
[root@porty ~]# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (192.168.0.3) 56(84) bytes of data.
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.065 ms
^C
--- tomcat02 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.065/0.065/0.065/0.000 ms
[root@porty ~]# docker exec -it tomcat01 ping tomcat03
PING tomcat03 (192.168.0.4) 56(84) bytes of data.
64 bytes from tomcat03.mynet (192.168.0.4): icmp_seq=1 ttl=64 time=0.038 ms
64 bytes from tomcat03.mynet (192.168.0.4): icmp_seq=2 ttl=64 time=0.069 ms
^C
--- tomcat03 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.038/0.053/0.069/0.017 ms
[root@porty ~]# docker exec -it tomcat02 ping tomcat03
PING tomcat03 (192.168.0.4) 56(84) bytes of data.
64 bytes from tomcat03.mynet (192.168.0.4): icmp_seq=1 ttl=64 time=0.037 ms
64 bytes from tomcat03.mynet (192.168.0.4): icmp_seq=2 ttl=64 time=0.073 ms
^C
--- tomcat03 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.037/0.055/0.073/0.018 ms
实战:部署redis集群
# 创建网卡
docker network create --driver bridge --subnet 172.38.0.0/16 redis
#通过脚本创建六个redis配置
for port in $(seq 1 6); \
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
# 启动6个节点
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
# 测试 1-6
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /mydata/redis/node-1/data:/data \
-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6372:6379 -p 16372:16379 --name redis-2 \
-v /mydata/redis/node-2/data:/data \
-v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.12 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6373:6379 -p 16373:16379 --name redis-3 \
-v /mydata/redis/node-3/data:/data \
-v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.13 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6374:6379 -p 16374:16379 --name redis-4 \
-v /mydata/redis/node-4/data:/data \
-v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.14 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6375:6379 -p 16375:16379 --name redis-5 \
-v /mydata/redis/node-5/data:/data \
-v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.15 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
-v /mydata/redis/node-6/data:/data \
-v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
# 6个容器启动完之后,查看容器状态
[root@porty bin]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
bd0fbc00bc9e redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 5 seconds ago Up 4 seconds 0.0.0.0:6376->6379/tcp, 0.0.0.0:16376->16379/tcp redis-6
9e48e90d0efe redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 9 seconds ago Up 8 seconds 0.0.0.0:6375->6379/tcp, 0.0.0.0:16375->16379/tcp redis-5
0f31a695cd6f redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 seconds ago Up 13 seconds 0.0.0.0:6374->6379/tcp, 0.0.0.0:16374->16379/tcp redis-4
bf06bf4a355c redis:5.0.9-alpine3.11 "docker-entrypoint.s…" About a minute ago Up About a minute 0.0.0.0:6373->6379/tcp, 0.0.0.0:16373->16379/tcp redis-3
735fb1b13b2e redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 2 minutes ago Up 2 minutes 0.0.0.0:6372->6379/tcp, 0.0.0.0:16372->16379/tcp redis-2
4f9620408053 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 5 minutes ago Up 5 minutes 0.0.0.0:6371->6379/tcp, 0.0.0.0:16371->16379/tcp redis-1
# 进入节点1并配置集群
[root@porty bin]# docker exec -it redis-1 /bin/sh
/data # ls
appendonly.aof nodes.conf
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: fe8cf8086dce179b6b08dc8a68ea525a514063f1 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
M: d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
M: 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
S: de9e8762b1f53a0a474eb2515ada8e21b31737eb 172.38.0.14:6379
replicates 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd
S: a80bfaea952b35794ca7cd2f621658b59e23aa54 172.38.0.15:6379
replicates fe8cf8086dce179b6b08dc8a68ea525a514063f1
S: ff5332e913729309e00148650952af8197dcaaaa 172.38.0.16:6379
replicates d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: fe8cf8086dce179b6b08dc8a68ea525a514063f1 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: a80bfaea952b35794ca7cd2f621658b59e23aa54 172.38.0.15:6379
slots: (0 slots) slave
replicates fe8cf8086dce179b6b08dc8a68ea525a514063f1
M: d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: ff5332e913729309e00148650952af8197dcaaaa 172.38.0.16:6379
slots: (0 slots) slave
replicates d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df
S: de9e8762b1f53a0a474eb2515ada8e21b31737eb 172.38.0.14:6379
slots: (0 slots) slave
replicates 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
# 集群方式连接redis,并查看信息
/data # redis-cli -c
# 查看集群信息
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:293
cluster_stats_messages_pong_sent:314
cluster_stats_messages_sent:607
cluster_stats_messages_ping_received:309
cluster_stats_messages_pong_received:293
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:607
# 查看节点信息,三主三从
127.0.0.1:6379> cluster nodes
727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 172.38.0.13:6379@16379 master - 0 1608777454538 3 connected 10923-16383
a80bfaea952b35794ca7cd2f621658b59e23aa54 172.38.0.15:6379@16379 slave fe8cf8086dce179b6b08dc8a68ea525a514063f1 0 1608777454639 5 connected
d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df 172.38.0.12:6379@16379 master - 0 1608777454000 2 connected 5461-10922
ff5332e913729309e00148650952af8197dcaaaa 172.38.0.16:6379@16379 slave d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df 0 1608777454538 6 connected
de9e8762b1f53a0a474eb2515ada8e21b31737eb 172.38.0.14:6379@16379 slave 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 0 1608777455641 4 connected
fe8cf8086dce179b6b08dc8a68ea525a514063f1 172.38.0.11:6379@16379 myself,master - 0 1608777454000 1 connected 0-5460
# 添加键值对
127.0.0.1:6379> set name porty
-> Redirected to slot [5798] located at 172.38.0.12:6379 # 发现是2节点作处理
OK
172.38.0.12:6379> keys *
1) "name"
172.38.0.12:6379> set age 18
-> Redirected to slot [741] located at 172.38.0.11:6379 # 1节点作处理
OK
# 测试停掉redis-2容器,查看其从机redis-6的状态
cluster nodes
727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 172.38.0.13:6379@16379 master - 0 1608778032042 3 connected 10923-16383
a80bfaea952b35794ca7cd2f621658b59e23aa54 172.38.0.15:6379@16379 slave fe8cf8086dce179b6b08dc8a68ea525a514063f1 0 1608778032000 5 connected
d4d751b5152cbfdc1ce2e99d8ac88ac35c27a3df 172.38.0.12:6379@16379 master,fail - 1608778002575 1608778001000 2 connected # fail可以看到redis-2容器停止后,这个节点就已经失效了
ff5332e913729309e00148650952af8197dcaaaa 172.38.0.16:6379@16379 master - 0 1608778033045 7 connected 5461-10922 # redis-6容器的节点已经升级为主机master
de9e8762b1f53a0a474eb2515ada8e21b31737eb 172.38.0.14:6379@16379 slave 727d2f5245803ce83feaf03d1a02c6ce3ac9e6fd 0 1608778034047 4 connected
fe8cf8086dce179b6b08dc8a68ea525a514063f1 172.38.0.11:6379@16379 myself,master - 0 1608778033000 1 connected 0-5460
# 再次获取redis-2所创建的键值对 name = porty
get name
-> Redirected to slot [5798] located at 172.38.0.16:6379 # 发现已经是由redis-6容器(原redis-2的从机)来执行
"porty"
SpringBoot微服务打包Docker镜像
1.构建Springboot项目
2.打包应用
3.编写dockerfile
4.构建镜像
5.发布运行
docker build -t porty666 .
Sending build context to Docker daemon 17MB
Step 1/5 : FROM java:8
8: Pulling from library/java
5040bd298390: Pull complete
fce5728aad85: Pull complete
76610ec20bf5: Pull complete
60170fec2151: Pull complete
e98f73de8f0d: Pull complete
11f7af24ed9c: Pull complete
49e2d6393f32: Pull complete
bb9cdec9c7f3: Pull complete
Digest: sha256:c1ff613e8ba25833d2e1940da0940c3824f03f802c449f3d1815a66b7f8c0e9d
Status: Downloaded newer image for java:8
---> d23bdf5b1b1b
Step 2/5 : COPY *.jar /app.jar
---> 98268a327367
Step 3/5 : CMD ["--server.port=8080"]
---> Running in a3e3ea023c9a
Removing intermediate container a3e3ea023c9a
---> 2575d4eb31f1
Step 4/5 : EXPOSE 8080
---> Running in 720dae9c3328
Removing intermediate container 720dae9c3328
---> 75f74bac0b74
Step 5/5 : ENTRYPOINT ["java","-jar","/app.jar"]
---> Running in 66940a1d8b04
Removing intermediate container 66940a1d8b04
---> ba3ae7ddc4de
Successfully built ba3ae7ddc4de
Successfully tagged porty666:latest
[root@porty idea]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@porty idea]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
porty666 latest ba3ae7ddc4de 2 minutes ago 660MB
mysql 5.7 f07dfa83b528 2 days ago 448MB
tomcat latest feba8d001e3f 5 days ago 649MB
redis 5.0.9-alpine3.11 3661c84ee9d0 8 months ago 29.8MB
java 8 d23bdf5b1b1b 3 years ago 643MB
[root@porty idea]# docker run -d -P --name my-project porty666
1469490b8f817ac070f38baeaf270610763e54ed6337f8854abc03a44702d65f
[root@porty idea]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1469490b8f81 porty666 "java -jar /app.jar …" 2 seconds ago Up 1 second 0.0.0.0:49168->8080/tcp my-project
[root@porty idea]# curl localhost:49168
{"timestamp":"2020-12-24T03:12:48.547+00:00","status":404,"error":"Not Found","message":"","path":"/"}[root@porty idea]# ^C
[root@porty idea]# curl localhost:49168/hello
hello porty![root@porty idea]#
以上为狂神说Java的Docker课程内容总结而出的笔记
更多推荐
所有评论(0)