Docker 学习

docker学习

安装 docker

环境查看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# 系统内核
[root@Master ~]# uname -r
3.10.0-1127.el7.x86_64

# 系统版本
[root@Master ~]# cat /etc/os-release
NAME="CentOS Linux"
VERSION="7 (Core)"
ID="centos"
ID_LIKE="rhel fedora"
VERSION_ID="7"
PRETTY_NAME="CentOS Linux 7 (Core)"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:centos:centos:7"
HOME_URL="https://www.centos.org/"
BUG_REPORT_URL="https://bugs.centos.org/"

CENTOS_MANTISBT_PROJECT="CentOS-7"
CENTOS_MANTISBT_PROJECT_VERSION="7"
REDHAT_SUPPORT_PRODUCT="centos"
REDHAT_SUPPORT_PRODUCT_VERSION="7"

安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 卸载旧版本的 所有docker 信息
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine

# 安装 需要的 包
yum install -y yum-utils

# 设置镜像的仓库 (使用阿里云镜像地址)
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 更新 yum 软件 包索引
yum makecache fast

# 安装 docker 相关 docker-ce(社区版)
yum install docker-ce docker-ce-cli containerd.io

启动 docker服务

1
2
3
4
5
# 启动
systemctl start docker

#配置开机自动启动
systemctl enable docker

更新Docker

1
2
3
4
5
6
7
8
9
10
11
# 卸载以前的版本
$ yum remove docker docker-common docker-selinux dockesr-engine

# 查看所有仓库中所有docker版本,并选择特定版本安装
$ yum list docker-ce --showduplicates | sort -r

# 安装docker-c版本
$ yum install -y docker-ce

# 查看安装后的版本
$ docker version

docker 常用的命令

帮助命令

1
2
3
docker version 		#显示docker的版本信息
docker info #显示docker的系统信息,包括镜像和容器的数量
docker 命令 --help #帮助命令

镜像命令

1
2
3
4
5
6
7
8
9
10
docker images		#查看所有镜像

# 保存镜像到本地


# 加载本地镜像
docker load < 文件名.tar

#eg
docker load < images.tar # images.tar即镜像名称

容器命令

1
docker pull centos

新建容器并启动

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
docker run [可选参数] image

# 参数说明
--name="Name" 容器名称 tomcat01 tomcat02 用来区分容器
-d 后台方式运行
-it 使用交互方式运行,进入容器查看内容
-p 指定容器的端口,-p 8080:8080
-p ip:主机端口:主机端口
-p 主机端口:容器端口(常用)
-p 容器端口
容器端口
-P 随机指定端口

# 测试 启动并进入容器
[root@Master ~]# docker run -it centos /bin/bash
[root@200da2a4ffb9 /]# ls # 查看容器内的CentOS 基础版本,很多命令都不是完善的

#从容器中退回主机
[root@200da2a4ffb9 /]# exit
exit

列出所有运行的容器

1
2
3
4
5
6
7
8
9
10
11
12
# docker ps 命令
# 列出当前正在运行的容器
-a # 列出当前运行的容器+带出历史运行过的容器
-n=?# 显示最近创建的容器
-q # 只显示容器的编号
[root@Master ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@Master ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
200da2a4ffb9 centos "/bin/bash" 3 minutes ago Exited (0) About a minute ago great_hellman
e6c269d9a936 hello-world "/hello" 4 hours ago Exited (0) 4 hours ago eloquent_brahmagupta
[root@Master ~]#

推出容器

1
2
exit		# 容器停止并退出
Ctrl+P+Q # 容器不停止退出

删除容器

1
2
3
docker rm 容器id				# 删除指定的容器,不能删除正在运行的容器,如果需要强制删除, rm -f
docker rm -f $(docker ps -aq)# 删除所有的容器
docker ps -a -q|xargs docker rm # 删除所用的容器

启动和停止容器的操作

1
2
3
4
docker start 容器id			# 启动容器
docker restart 容器id # 重启容器
docker stop 容器id # 停止正在运行的容器
docker kill 容器id # 强制关掉当前容器

常用的其他命令

后台启动容器

1
2
3
4
5
6
7
# 命令 docker run -d 镜像名!
[root@Master ~]# docker run -d centos

# 问题 docker ps 发现 CentOS 停止了

# 常见的坑 docker 容器使用后台运行,就必须要有一个前台进程,docker发现没有应用了,就会自动停止
# Nginx .容器启动后,发现自己没有提供服务,就会立刻停止,

查看日志

docker 安装 nginx

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 1.搜索镜像 search 
# 2.下载镜像 pull
# 3.运行测试
[root@Master ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
centos latest 831691599b88 11 days ago 215MB
nginx latest 2622e6cca7eb 2 weeks ago 132MB
hello-world latest bf756fb1ae65 5 months ago 13.3kB
# -d 后台运行
# --name 容器名字
# -p 宿主机端口:容器内部端口
[root@Master ~]# docker run -d --name nginx01 -p:8631:80 nginx
7fb7deced85d0f40f4279f8090fba43cc1ed281ecf3ba77962f7696c1f8d2a5f
[root@Master ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
7fb7deced85d nginx "/docker-entrypoint.…" 6 seconds ago Up 4 seconds 0.0.0.0:8631->80/tcp nginx01
[root@Master ~]# curl 127.0.0.1:8631

# 进入容器
# 查看容器信息
[root@Master ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
7fb7deced85d nginx "/docker-entrypoint.…" 8 minutes ago Up 7 minutes 0.0.0.0:8631->80/tcp nginx01
# 进入容器
[root@Master ~]# docker exec -it nginx01 /bin/bash
root@7fb7deced85d:/# ls
bin boot dev docker-entrypoint.d docker-entrypoint.sh etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var
root@7fb7deced85d:/# whereis nginx
nginx: /usr/sbin/nginx /usr/lib/nginx /etc/nginx /usr/share/nginx
root@7fb7deced85d:/# cd /etc/nginx/
root@7fb7deced85d:/etc/nginx# ls
conf.d fastcgi_params koi-utf koi-win mime.types modules nginx.conf scgi_params uwsgi_params win-utf
root@7fb7deced85d:/etc/nginx#

docker 安装 tomcat

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 官方使用的方法
docker run -it --rm tomcat:9.0
# 之前启动的都是后台,在容器停止后,还能查看,docker run -it --rm 一般用来测试,用完即删除

# 下载再启动
docker pull tomcat

# 启动运行
docker run -d -p 8632:8080 --name tomcat01 tomcat

# 测试访问

# 进入容器
[root@Master ~]# docker exec -it tomcat01 /bin/bash

# 发现问题:1.Linux命令少了,2.没有webApp,阿里云镜像的原因,所有不必要的都剔除掉
# 保证最小可运行的环境

docker 部署 es + kibana

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# es 暴露的端口很懂
# es 十分的耗内存
# es 的数据一般放置到安全目录 挂载
# --net somenetwork 网络配置

# 启动 elasticsearch
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.6.2

# 得到的信息
[root@Master ~]# curl 127.0.0.1:9200
{
"name" : "4829da344e6f",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "e5T4EB9NR1eVHcJcj4qfgQ",
"version" : {
"number" : "7.6.2",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "ef48eb35cf30adf4db14086e8aabd07ef6fb113f",
"build_date" : "2020-03-26T06:34:37.794943Z",
"build_snapshot" : false,
"lucene_version" : "8.4.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}


# 启动了之后 Linux 就很卡 docker stats 查看 CPU的状态
CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS
4829da344e6f elasticsearch 0.68% 559.9MiB / 972.3MiB 57.58% 656B / 0B 0B / 0B 41
7fb7deced85d nginx01 0.00% 236KiB / 972.3MiB 0.02% 3.5kB / 3.93kB 0B / 0B 2

# 给 elasticsearch 添加内存显示
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.6.2

commit 镜像

1
2
3
4
5
6
docker commit 提交容器称为一个新的副本

docker commit -m="提交的描述信息" -a="作者" 容器id 目标镜像名:[TAG]

# 测试

容器数据卷

什么是容器数据卷

容器之间可以有一个数据共享的技术,Docker容器产生的数据,同步到本地

这就是卷技术! 目录的挂载,将我们容器内的目录,挂载到Linux上面

总结:容器的持久化和同步操作,容器将数据共享

使用数据卷

方式一:直接使用命令来挂载 -v

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
docker run -it -v 主机目录:容器内目录 -p

# 测试

# 启动 起来之后.通过 docker inspect 容器 id 查看

# 得到如下信息:
[root@Master cs]# docker inspect e26acffc1605
[
{
"Id": "e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf",
"Created": "2020-06-29T07:24:13.738029974Z",
"Path": "/bin/bash",
"Args": [],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 3064,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-06-29T07:24:14.37715389Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:831691599b88ad6cc2a4abbd0e89661a121aff14cfa289ad840fd3946f274f1f",
"ResolvConfPath": "/var/lib/docker/containers/e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf/hostname",
"HostsPath": "/var/lib/docker/containers/e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf/hosts",
"LogPath": "/var/lib/docker/containers/e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf/e26acffc1605dd32accfab624786e6a14cb7a190b97465125640515fbad256bf-json.log",
"Name": "/romantic_fermat",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/home/cs:/home"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"Capabilities": null,
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": null,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": [
"/proc/asound",
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"ReadonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/577daf27c1bf419becf4a5ea1f20613af48eb5c6bd2a0b08ec4414597e2665cc-init/diff:/var/lib/docker/overlay2/56594f75bb4f0cc2cfba924950f681cab8bcaa9eda0bca55fb6b9a682b024bcf/diff",
"MergedDir": "/var/lib/docker/overlay2/577daf27c1bf419becf4a5ea1f20613af48eb5c6bd2a0b08ec4414597e2665cc/merged",
"UpperDir": "/var/lib/docker/overlay2/577daf27c1bf419becf4a5ea1f20613af48eb5c6bd2a0b08ec4414597e2665cc/diff",
"WorkDir": "/var/lib/docker/overlay2/577daf27c1bf419becf4a5ea1f20613af48eb5c6bd2a0b08ec4414597e2665cc/work"
},
"Name": "overlay2"
},
"Mounts": [ # 挂载 -v 卷
{
"Type": "bind",
"Source": "/home/cs", # 主机内地址
"Destination": "/home", # docker容器内地址
"Mode": "",
"RW": true,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "e26acffc1605",
"Domainname": "",
"User": "",
"AttachStdin": true,
"AttachStdout": true,
"AttachStderr": true,
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/bash"
],
"Image": "centos",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"org.label-schema.build-date": "20200611",
"org.label-schema.license": "GPLv2",
"org.label-schema.name": "CentOS Base Image",
"org.label-schema.schema-version": "1.0",
"org.label-schema.vendor": "CentOS"
}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "0b51b73d479896bb96eee9690c74e2d7853ab871e86110c8d1ed0669620b495c",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {},
"SandboxKey": "/var/run/docker/netns/0b51b73d4798",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "c0f376d795cbe26d14344a711f46cdb1d593d3155c6c073009ac9bfd3385a585",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:03",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "bb94e6ee613789da87156eaf970163e0bf98cb5bb7c0093d842a1c878ba6dbc0",
"EndpointID": "c0f376d795cbe26d14344a711f46cdb1d593d3155c6c073009ac9bfd3385a585",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:03",
"DriverOpts": null
}
}
}
}
]

好处:只需要修改宿主机上面的配置,容器内会自动同步

实战 : 安装 mysql

1
2
3
4
5
6
7
8
9
10
11
12
13
# 获取镜像
[root@Master ~]# docker pull mysql:5.7

# 运行容器时 需要做数据挂载
# 官方测试 docker run --name some-mysql -e MYSQL_ROOT_PASSWORD=my-secret-pw -d mysql:tag

# 启动
-d 后台启动
-p 端口映射
-v 挂在卷
-e 环境配置
--name 容器名称
[root@Master ~]# docker run -d -p 3310:3306 -v /home/mysql/conf:/etc/mysq/conf.d -v /home/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=root --name mysql01 mysql:5.7

具名和匿名挂载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# 匿名挂载
-v 容器内路径
docker run -d -P --name nginx01 -v /etc/nginx nginx

# 查看所有 volume 的情况
[root@Master ~]# docker volume ls
DRIVER VOLUME NAME
local 96adbd945bbe006aef607564f8d29cb207342ebde96b170b1e790e4acf670dd2


# 具名挂载
[root@Master ~]# docker run -d -P --name nginx02 -v juming-nginx:/etc/nginx nginx
f090a93167a519513f58755e60261b1bcbf1e17832a00f28f8908994484b96e1
[root@Master ~]# docker volume ls
DRIVER VOLUME NAME
local 96adbd945bbe006aef607564f8d29cb207342ebde96b170b1e790e4acf670dd2
local juming-nginx

# 通过 -v 卷名:容器内路径
# 查看 卷路径
[root@Master ~]# docker volume inspect juming-nginx
[
{
"CreatedAt": "2020-06-29T16:41:11+08:00",
"Driver": "local",
"Labels": null,
"Mountpoint": "/var/lib/docker/volumes/juming-nginx/_data",
"Name": "juming-nginx",
"Options": null,
"Scope": "local"
}
]


# 所有 docker 容器内的卷,在没有指定路径的情况下 都是在 /var/lib/docker/volumes/xxxx/_data (大多数情况下使用 具名挂载)

# 如何确定具名挂载还是匿名挂载,还是指定路径挂载
-v 容器内路径 # 匿名挂载
-v 卷名:容器内路径 # 具名挂载
-v /宿主机路径:容器内路径 # 指定路径挂载

拓展

1
2
3
4
5
6
7
8
9
10
# 通过 -v 容器路径 ro rw 改变读写权限
ro readonly # 只读
rw readwrite # 可读可写

# 设置了容器权限 容器对挂载出来的内容就有限定了
docker run -d -P --name nginx02 -v juming-nginx:/etc/nginx:ro nginx

docker run -d -P --name nginx02 -v juming-nginx:/etc/nginx:rw nginx

# ro 表示 路径只能通过宿主机来操作,容器内无法操作

DockerFile 就是用来构建docker镜像的构建文件 命令脚本

通过脚本可以生成镜像 镜像是一层一层的, 脚本一个个的命令,每个命令都是一层

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# 创建一个dockerfile文件 名称可以随机  建议 Dockerfile
# 文件中的内容(大写) 参数

FROM centos
VOLUME ["volume01","volume02"]

CMD echo "------end----"

CMD /bin/bash
# 这里的每个命令 就是镜像的一层

# 执行 文件
[root@Master docker-test-volume]# docker build -f dockerfile1 -t jiang/centos .
Sending build context to Docker daemon 2.048kB
Step 1/4 : FROM centos
---> 831691599b88
Step 2/4 : VOLUME ["volume01","volume02"]
---> Running in 8c8ae28b5b73
Removing intermediate container 8c8ae28b5b73
---> 541d3bdb8d37
Step 3/4 : CMD echo "------end----"
---> Running in 4a7199a7ce6c
Removing intermediate container 4a7199a7ce6c
---> 2f6114c16160
Step 4/4 : CMD /bin/bash
---> Running in 2e8164276601
Removing intermediate container 2e8164276601
---> 89c3e5a2dda3
Successfully built 89c3e5a2dda3
Successfully tagged jiang/centos:latest

启动自己写的容器

1
2
3
4
[root@Master docker-test-volume]# docker run -it 89c3e5a2dda3 /bin/bash

# 查看文件信息
[root@8bdefa71c931 /]# ls -l

image-20200629171937656

同时这个卷和外部一定有一个同步的目录

查看挂载卷的路径

1
[root@Master ~]# docker inspect 8bdefa71c931

image-20200629172238437

这种方式使用十分多,

如果构建镜像的时候没有挂在卷 需要手动镜像挂载 -v 卷名:容器内路径

数据卷容器

多个容器同步数据 !

1
2
3
4
5
6
7
8
9
10
11
12
# 启动3个容器 通过自己写的镜像启动

# 容器1
docker run -it --name docker01 jiang/cemtos

# 容器2
docker run -it --name docker02 --volumes-from docker01 jiang/cemtos

# 容器3
docker run -it --name docker03 --volumes-from docker01 jiang/cemtos

# 当 docker01 被删除 ,docker02 和03 上面的数据依然存在,相互间会拷贝数据

多个mysql同步数据 !

1
2
3
docker run -d -p 3310:3306 -v /etc/mysql/conf.d -v /var/lib/mysq -e MYSQL_ROOT_PASSWORD=root --name mysql01 mysql:5.7

docker run -d -p 3310:3306 -e MYSQL_ROOT_PASSWORD=root --name mysql02 --volumes-from mysql01 mysql:5.7

结论:

容器之间配置信息的传递,数据卷容器的生命周期一直持续到没有容器使用为止

一旦 持久化到本地,本地的数据不会删除

DockerFile

DockerFile 介绍

DockerFile 就是用来构建docker镜像的构建文件 命令脚本

构建步骤:

1、编写一个dockerfile文件

2、docker build 构建成为一个镜像

3、docker run 运行镜像

4、docker push 发布镜像(DockerHub 、阿里云镜像仓库等)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# eg 
FROM scratch
ADD centos-7-x86_64-docker.tar.xz /

LABEL \
org.label-schema.schema-version="1.0" \
org.label-schema.name="CentOS Base Image" \
org.label-schema.vendor="CentOS" \
org.label-schema.license="GPLv2" \
org.label-schema.build-date="20200504" \
org.opencontainers.image.title="CentOS Base Image" \
org.opencontainers.image.vendor="CentOS" \
org.opencontainers.image.licenses="GPL-2.0-only" \
org.opencontainers.image.created="2020-05-04 00:00:00+01:00"

CMD ["/bin/bash"]

DockerFile构建过程

基础知识

1、每个保留关键字(指令)都必须是大写字母

2、执行从上到下顺序执行

3、# 表示注释

4、每个指令都会创建提交一个新的镜像层,并提交!

dockerfile 是面向开发的,做镜像需要编写dockerfile,这个文件十分简单

Docker镜像逐渐成为企业交付的标准,必须要掌握

步骤:开发、部署、运维等

dockerfile :构建文件,定义了一切的步骤,源代码

DockerImages:通过DockerFile构建生成的镜像,最终发布和运行产品

Docker容器:容器就是镜像运行起来提供服务器

DockerFile的指令

1
2
3
4
5
6
7
8
9
10
11
12
FROM 		# 基础镜像 一切从这里开始构建
MAINTAINER # 镜像是谁写的 姓名+邮箱
RUN # 镜像构建的时候需要运行的命令
ADD # 步骤,tomcat镜像 这个tomcat压缩包,添加内容
WORKDIR # 镜像的工作目录
VOLUME # 挂载卷的目录
EXPOSE # 保留端口配置
CMD # 指定这个容器启动的时候要运行的命令,只有最后一个会生效,可被替代(覆盖)
ENTRYPOINT # 指定这个容器启动的时候要运行的命令,可以追加命令
ONBUILD # 当构建一个被继承DockerFile 这个时候就会运行 ONBUILD 的指令,出发指令
COPY # 类似 ADD 将我们文件拷贝到镜像中
ENV # 构建的时候设置环境变量

实战测试

Docker Hub中99%的镜像都是从这个基础镜像过来的FROM scratch ,然后配置需要的软件和配置来进行的构建

创建一个自己的CentOS

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# 1.编写DockerFile 文件
FROM centos
MAINTAINER jiang<1845124851@qq.com>

ENV MYPATH /user/local
WORKDIR $MYPATH

RUN yum -y install nano
RUN yum -y install net-tools

EXPOSE 80

CMD echo $MYPATH
CMD echo "---end---"
CMD /bin/bash

# 2.通过这个文件构建镜像
# 命令 docker build -f dockerfile文件路径 -t 镜像名:[tag]
# eg: docker build -f dockerfile -t mycentos:0.1 .
Successfully built 16c9df0ef832
Successfully tagged mycentos:0.1

# 3.测试运行
docker run -it mycentos:0.1

列出本地镜像的变更历史

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 命令 docker history 镜像id

# 测试
[root@Master dockerfile]# docker history 16c9df0ef832
IMAGE CREATED CREATED BY SIZE COMMENT
16c9df0ef832 22 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "/bin… 0B
12cd5169633b 22 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "echo… 0B
1ebcf4b7315d 22 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "echo… 0B
ec576b555d70 22 minutes ago /bin/sh -c #(nop) EXPOSE 80 0B
a6cad6a23210 22 minutes ago /bin/sh -c yum -y install net-tools 22.1MB
1b20ba38a30c 22 minutes ago /bin/sh -c yum -y install nano 31.2MB
b7bc1b455416 23 minutes ago /bin/sh -c #(nop) WORKDIR /user/local 0B
34aa07ad44da 23 minutes ago /bin/sh -c #(nop) ENV MYPATH=/user/local 0B
114da438a7cc 24 minutes ago /bin/sh -c #(nop) MAINTAINER jiang<18451248… 0B
831691599b88 13 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B
<missing> 13 days ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B
<missing> 13 days ago /bin/sh -c #(nop) ADD file:84700c11fcc969ac0… 215MB
[root@Master dockerfile]#

CMD 和 ENTRYPOINT 的区别

1
2
CMD			# 指定这个容器启动的时候要运行的命令,只有最后一个会生效,可被替代(覆盖)
ENTRYPOINT # 指定这个容器启动的时候要运行的命令,可以追加命令

测试 cmd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# 编写 dockerfile 文件
[root@Master dockerfile]# nano dockerfile-cmd-test
FROM centos
CMD ["ls","-a"]

# 构建镜像
[root@Master dockerfile]# docker build -f dockerfile-cmd-test -t cmdtest .

# 运行镜像
[root@Master dockerfile]# docker run 53a093170e89
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var

# 追加一个命令 -l ls -al
[root@Master dockerfile]# docker run 53a093170e89 -l
docker: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused "exec: \"-l\": executable file not found in $PATH": unknown.

# cmd 的情况下 -l 替换 CMD ["ls","-a"]命令 ,-l不是命令所有报错

测试 ENTRYPOINT

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# 创建脚本
[root@Master dockerfile]# nano dockerfile-cmd-entrypoint
FROM centos
ENTRYPOINT ["ls","-a"]

# 构建镜像
[root@Master dockerfile]# docker build -f dockerfile-cmd-entrypoint -t entrypoint-test .
Sending build context to Docker daemon 4.096kB
Step 1/2 : FROM centos
---> 831691599b88
Step 2/2 : ENTRYPOINT ["ls","-a"]
---> Running in de1d0dbfa1c7
Removing intermediate container de1d0dbfa1c7
---> 41b568c3a79a
Successfully built 41b568c3a79a
Successfully tagged entrypoint-test:latest

#运行镜像
[root@Master dockerfile]# docker run 41b568c3a79a
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var

# 在运行镜像时追加命令 是直接追加在当前构建的 ENTRYPOINT 镜像后面的
[root@Master dockerfile]# docker run 41b568c3a79a -l
total 0
drwxr-xr-x. 1 root root 6 Jun 30 06:36 .
drwxr-xr-x. 1 root root 6 Jun 30 06:36 ..
-rwxr-xr-x. 1 root root 0 Jun 30 06:36 .dockerenv
lrwxrwxrwx. 1 root root 7 May 11 2019 bin -> usr/bin
drwxr-xr-x. 5 root root 340 Jun 30 06:36 dev
drwxr-xr-x. 1 root root 66 Jun 30 06:36 etc
drwxr-xr-x. 2 root root 6 May 11 2019 home
lrwxrwxrwx. 1 root root 7 May 11 2019 lib -> usr/lib
lrwxrwxrwx. 1 root root 9 May 11 2019 lib64 -> usr/lib64
drwx------. 2 root root 6 Jun 11 02:35 lost+found
drwxr-xr-x. 2 root root 6 May 11 2019 media
drwxr-xr-x. 2 root root 6 May 11 2019 mnt
drwxr-xr-x. 2 root root 6 May 11 2019 opt
dr-xr-xr-x. 135 root root 0 Jun 30 06:36 proc
dr-xr-x---. 2 root root 162 Jun 11 02:35 root
drwxr-xr-x. 11 root root 163 Jun 11 02:35 run
lrwxrwxrwx. 1 root root 8 May 11 2019 sbin -> usr/sbin
drwxr-xr-x. 2 root root 6 May 11 2019 srv
dr-xr-xr-x. 13 root root 0 Jun 30 02:47 sys
drwxrwxrwt. 7 root root 145 Jun 11 02:35 tmp
drwxr-xr-x. 12 root root 144 Jun 11 02:35 usr
drwxr-xr-x. 20 root root 262 Jun 11 02:35 var

实战 : Tomcat 镜像

  1. 准备镜像文件 tomcat压缩包 jdk的压缩包

    1
    2
    3
    4
    [root@Master tomcat]# ll
    总用量 198264
    -rw-r--r--. 1 root root 11200905 6月 30 14:46 apache-tomcat-9.0.36.tar.gz
    -rw-r--r--. 1 root root 191817140 6月 30 14:46 jdk-8u201-linux-x64.tar.gz
  2. 编写dockerfile文件 官方命名 Dockerfile build会自动寻找这个文件,就不需要 -f 指定了

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    FROM centos
    MAINTAINER jiang<1845124851@qq.com>

    COPY readme.txt /usr/local/readme.txt

    ADD jdk-8u201-linux-x64.tar.gz /usr/local/
    ADD apache-tomcat-9.0.36.tar.gz /usr/local/

    RUN yum -y install nano

    ENV MYPATH /usr/local

    WORKDIR $MYPATH

    ENV JAVA_HOME /usr/local/jdk1.8.0_201
    ENV CLASS_PATH $JAVA_HOME/lib/dt.jar;$JAVA_HOME/lib/tools.jar

    ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.36
    ENV CATALINA_BASH /usr/local/apache-tomcat-9.0.36

    ENV PATH $PATH:$JAVA_HOME/bin;$CATALINA_HOME/lib;CATALINA_HOME/bin

    EXPOSE 8080

    CMD /usr/local/apache-tomcat-9.0.36/bin/startup.sh && tail -F /usr/local/apache-tomcat-9.0.36/bin/logs/catalina.out

  3. 构建镜像

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    # docker build -t diytomcat .

    # 结果
    [root@Master tomcat]# docker build -t diytomcat .
    Sending build context to Docker daemon 203MB
    Step 1/15 : FROM centos
    ---> 831691599b88
    Step 2/15 : MAINTAINER jiang<1845124851@qq.com>
    ---> Using cache
    ---> 114da438a7cc
    Step 3/15 : COPY readme.txt /usr/local/readme.txt
    ---> faa2aca474e1
    Step 4/15 : ADD jdk-8u201-linux-x64.tar.gz /usr/local/
    ---> 097ef9175123
    Step 5/15 : ADD apache-tomcat-9.0.36.tar.gz /usr/local/
    ---> 94e4e44a089f
    Step 6/15 : RUN yum -y install nano
    ---> Running in 39596d95401e
    CentOS-8 - AppStream 1.8 MB/s | 5.8 MB 00:03
    CentOS-8 - Base 564 kB/s | 2.2 MB 00:04
    CentOS-8 - Extras 7.1 kB/s | 6.7 kB 00:00
    Dependencies resolved.
    ================================================================================
    Package Architecture Version Repository Size
    ================================================================================
    Installing:
    nano x86_64 2.9.8-1.el8 BaseOS 581 k

    Transaction Summary
    ================================================================================
    Install 1 Package

    Total download size: 581 k
    Installed size: 2.2 M
    Downloading Packages:
    nano-2.9.8-1.el8.x86_64.rpm 527 kB/s | 581 kB 00:01
    --------------------------------------------------------------------------------
    Total 117 kB/s | 581 kB 00:04
    warning: /var/cache/dnf/BaseOS-f6a80ba95cf937f2/packages/nano-2.9.8-1.el8.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID 8483c65d: NOKEY
    CentOS-8 - Base 1.1 MB/s | 1.6 kB 00:00
    Importing GPG key 0x8483C65D:
    Userid : "CentOS (CentOS Official Signing Key) <security@centos.org>"
    Fingerprint: 99DB 70FA E1D7 CE22 7FB6 4882 05B5 55B3 8483 C65D
    From : /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
    Key imported successfully
    Running transaction check
    Transaction check succeeded.
    Running transaction test
    Transaction test succeeded.
    Running transaction
    Preparing : 1/1
    Installing : nano-2.9.8-1.el8.x86_64 1/1
    Running scriptlet: nano-2.9.8-1.el8.x86_64 1/1
    Verifying : nano-2.9.8-1.el8.x86_64 1/1

    Installed:
    nano-2.9.8-1.el8.x86_64

    Complete!
    Removing intermediate container 39596d95401e
    ---> 89b6438bbe17
    Step 7/15 : ENV MYPATH /usr/local
    ---> Running in f06501462526
    Removing intermediate container f06501462526
    ---> 8fc6958b3a6e
    Step 8/15 : WORKDIR $MYPATH
    ---> Running in fa73a1455658
    Removing intermediate container fa73a1455658
    ---> 1f69bc61e000
    Step 9/15 : ENV JAVA_HOME /usr/local/jdk1.8.0_201
    ---> Running in 7e6048372716
    Removing intermediate container 7e6048372716
    ---> bf3d54c57702
    Step 10/15 : ENV CLASS_PATH $JAVA_HOME/lib/dt.jar;$JAVA_HOME/lib/tools.jar
    ---> Running in bb1a6c75c0d9
    Removing intermediate container bb1a6c75c0d9
    ---> c5f2ecf75b2f
    Step 11/15 : ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.36
    ---> Running in d9ee2e27d984
    Removing intermediate container d9ee2e27d984
    ---> a0dcffe52231
    Step 12/15 : ENV CATALINA_BASH /usr/local/apache-tomcat-9.0.36
    ---> Running in 461799d608d1
    Removing intermediate container 461799d608d1
    ---> d2496591c7bd
    Step 13/15 : ENV PATH $PATH:$JAVA_HOME/bin;$CATALINA_HOME/lib;CATALINA_HOME/bin
    ---> Running in 4ca62fc3160d
    Removing intermediate container 4ca62fc3160d
    ---> b17bfc8ba17f
    Step 14/15 : EXPOSE 8080
    ---> Running in b4505c081a84
    Removing intermediate container b4505c081a84
    ---> ed9783956463
    Step 15/15 : CMD /usr/local/apache-tomcat-9.0.36/bin/startup.sh && tail -F /usr/local/apache-tomcat-9.0.36/bin/logs/catalina.out
    ---> Running in 763c88837e33
    Removing intermediate container 763c88837e33
    ---> 67cbdc4556d3
    Successfully built 67cbdc4556d3
    Successfully tagged diytomcat:latest
  4. 启动镜像

    1
    docker run -d -p 9090:8080 --name jiangtomcat -v /home/tomcat/test:/usr/local/apache-tomcat-9.0.36/webapps/test -v /home/tomcat/tomcatlogs/:/usr/local/apache-tomcat-9.0.36/logs diytomcat
  5. 访问测试

  6. 发布项目

    1
    2
    3
    4
    5
    6
    7
    8
    <?xml version="1.0" encoding="UTF-8"?>
    <web-app xmlns="http://java.sun.com/xml/ns/javaee"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
    http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
    version="2.5">

    </web-app>
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    <%@ page language="java" contentType="text/html; charset=UTF-8"
    pageEncoding="UTF-8"%>
    <!DOCTYPE html>
    <html>
    <head>
    <meta charset="utf-8">
    <title>菜鸟教程(runoob.com)</title>
    </head>
    <body>
    Hello World!<br/>
    <%
    System.out.println("----- my test web logs-----");
    out.println("你的 IP 地址 " + request.getRemoteAddr());
    %>
    </body>
    </html>

发布镜像

Docker Hub

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# docker login 命令登录docker

# 查看 login 命令的帮助
[root@Master tomcat]# docker login --help

Usage: docker login [OPTIONS] [SERVER]

Log in to a Docker registry.
If no server is specified, the default is defined by the daemon.

Options:
-p, --password string Password
--password-stdin Take the password from stdin
-u, --username string Username

# 登录
[root@Master tomcat]# docker login -u 1845124851
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

# 提交镜像
[root@Master tomcat]# docker push 1845124851/diytomcat
The push refers to repository [docker.io/1845124851/diytomcat]
An image does not exist locally with the tag: 1845124851/diytomcat

# 如果出现如上问题的解决方法
# 增加一个 tag
[root@Master tomcat]# docker tag 67cbdc4556d3 1845124851/tomcat:1.0

# 提交
[root@Master tomcat]# docker push 1845124851/tomcat:1.0
The push refers to repository [docker.io/1845124851/tomcat]
4551daedb04f: Pushed
36ccc9c5fbf8: Pushing [==================================================>] 16.16MB
bca9fcc0c094: Pushed
d688492072a8: Pushed

提交到阿里云镜像服务器上

  1. 登录阿里云

    访问

  2. 找到容器镜像服务

  3. 创建命名空间

  4. 创建容器镜像

    1
    2
    3
    4
    5
    6
    7
    8
    # 登录阿里云docker 如果不能登录,访问 https://cr.console.aliyun.com/cn-hangzhou/instances/credentials 设置固定密码
    docker login --username=1845124851@qq.com registry.cn-hangzhou.aliyuncs.com

    # 先在本地创建版本信息 貌似 registry.cn-hangzhou.aliyuncs.com/jnssd 是配置的提交的地址 如果不配置,好像是提交到了docker hub上面去了
    docker tag 67cbdc4556d3 registry.cn-hangzhou.aliyuncs.com/jnssd/jnssd:1.0

    # 镜像的提交
    docker push registry.cn-hangzhou.aliyuncs.com/jnssd/jnssd:1.0
  5. 镜像的拉取

    1
    docker pull registry.cn-hangzhou.aliyuncs.com/jnssd/jnssd:[镜像版本号]

Docker 网络

理解Docker0

清空所有docker容器和镜像

1
2
3
4
5
# 清空容器
docker rm -f $(docker ps -aq)

# 清空镜像信息
docker rmi -f $(docker images -aq)

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# 启动一个tomcat 
[root@Master ~]# docker run -d -P --name tomcat01 tomcat

# 查看容器的内部网络地址 ip addr 发现容器启动的时候会得到一个eth0@if5 ip地址
[root@Master ~]# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
4: eth0@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever

[root@Master ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.039 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.044 ms

# linux 可以ping 通docker容器内部

原理

  1. 我们每启动一个docker 容器,docker就会给docker容器分配一个ip,我们只要安装一个docker,就会有一个网卡docker0(桥接模式),使用的技术是evth-pair技术!

  2. 再启动一个容器测试,发现又多了一对网卡

    1
    2
    3
    4
    5
    6
    7
    8
    9
    7: vethf0db58b@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
    link/ether ae:42:be:a2:a2:c3 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::ac42:beff:fea2:a2c3/64 scope link
    valid_lft forever preferred_lft forever

    # 我们发现这个容器带来的网卡,都是一对对的
    # evth-pari 就是一对的虚拟设备接口,他们都是成都出现的,一段连着协议,一段彼此相连
    # 因为有这个特性,evth-pair 充当一个桥梁,连接各种虚拟网络设备的
    # OpenStac,Docker容器之间的连接,OVS的连接,都是使用evth-pair 技术
  3. 测试下tomcat01和tomcat02相互是否能够ping通

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    [root@Master ~]# docker exec -it tomcat01 ping 172.17.0.3
    PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
    64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.202 ms
    64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.059 ms
    64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.055 ms
    64 bytes from 172.17.0.3: icmp_seq=4 ttl=64 time=0.056 ms
    ^C
    --- 172.17.0.3 ping statistics ---
    4 packets transmitted, 4 received, 0% packet loss, time 3ms
    rtt min/avg/max/mdev = 0.055/0.093/0.202/0.062 ms

    # 结论 容器和容器之间是可以相互ping通的

    结论:tomcat01 和tomcat02 都是共用的一个路由器,docker0

    所有的容器不指定网络的情况下,都是docker0路由的,docker会给容器分配一个默认的可用IP

    小结

    Docker 使用的是Linux的桥接,宿主机中是一个Docker容器的网桥docker0

    Docker 中的所有网络接口都是虚拟的,虚拟的转发效率高

    只要容器删除,对应的网桥就删除了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@Master ~]# docker exec -it tomcat01 ping tomcat02
ping: tomcat02: Name or service not known

# 如何解决
[root@Master ~]# docker run -d -P --name tomcat03 --link tomcat02 tomcat
87ad2f3cc1623a9e1aa5479a2ad8d2a5d85bfecbb9f58202fe9d93306a34c6d7
[root@Master ~]# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.3): icmp_seq=1 ttl=64 time=0.245 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=2 ttl=64 time=0.056 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=3 ttl=64 time=0.056 ms

# 反向ping
[root@Master ~]# docker exec -it tomcat02 ping tomcat03
ping: tomcat03: Name or service not known


# 其实tomcat03 就是在本地配置了tomcat02的配置
[root@Master ~]# docker exec -it tomcat03 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 tomcat02 babe95100fbe
172.17.0.4 87ad2f3cc162

本质探究:–link就是在hosts配置增加了一个172.17.0.3 tomcat02 babe95100fbe

但是在tomcat02中没有配置,如果需要访问,需要在启动时都使用–link连接,效率低下,不见建议使用 –link

自定义网络,不使用 docker0

docker0 问题:不支持容器名

自定义网路

查看所有的docker网路

1
2
3
4
5
6
7
[root@Master ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
01a782518f1d bridge bridge local
35e3df1b1282 host host local
4b4d05825849 none null local
[root@Master ~]#

网路模式

bridge:桥接docker (默认,自己框架也是用bridge模式)

none:不配置网络

host:和宿主机共享网络

container:容器网络互通(用的少,局限大)

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
直接命令启动 --net bridge 这个就是docker0
docker run -d -P --name tomcat01 tomcat
docker run -d -P --name tomcat01 -net bridge tomcat

# docker0 特点,默认、域名不能访问、--link可以打通

# 自定义一个网络
# --driver bridge
# --subnet 192.168.0.0/16 ip个数 为192.168.0.2-192.168.255.255个
# --gateway 192.168.0.1
[root@Master ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
f4441cbd2d23b05fae43055ff959721dc88404e6cd04734751f9692d6cc0e5c0

[root@Master ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
01a782518f1d bridge bridge local
35e3df1b1282 host host local
f4441cbd2d23 mynet bridge local
4b4d05825849 none null local

# 查看创建的网络
[root@Master ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "f4441cbd2d23b05fae43055ff959721dc88404e6cd04734751f9692d6cc0e5c0",
"Created": "2020-07-01T14:51:12.97829315+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]

# 创建一个 tomcat-net-01
[root@Master ~]# docker run -d -P --name tomcat-net-01 --net mynet tomcat
ecc63c647175430885e3fb56ec7e4b1131ad83da20d78cef703f3191de42d4ed

# 创建一个 tomcat-net-02
[root@Master ~]# docker run -d -P --name tomcat-net-02 --net mynet tomcat
f615efee2ba8b9000f78712909b7f00fbfebf075075847b7b487a99dc9b083e3

# 查看自定义的网路配置
[root@Master ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "f4441cbd2d23b05fae43055ff959721dc88404e6cd04734751f9692d6cc0e5c0",
"Created": "2020-07-01T14:51:12.97829315+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"ecc63c647175430885e3fb56ec7e4b1131ad83da20d78cef703f3191de42d4ed": {
"Name": "tomcat-net-01",
"EndpointID": "ed39f17e261fd2ebe395d59b14a6a08128c951d526b295828ce6b42e27c00455",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"f615efee2ba8b9000f78712909b7f00fbfebf075075847b7b487a99dc9b083e3": {
"Name": "tomcat-net-02",
"EndpointID": "44e2f7596ca65e69005b801687ac86d5f8b5de4b9a5783b7eb5d5641f1ed818a",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]

# ping 自定义网路配置里面的ip
[root@Master ~]# docker exec -it tomcat-net-01 ping 192.168.0.3
PING 192.168.0.3 (192.168.0.3) 56(84) bytes of data.
64 bytes from 192.168.0.3: icmp_seq=1 ttl=64 time=0.204 ms
64 bytes from 192.168.0.3: icmp_seq=2 ttl=64 time=0.054 ms
^C
--- 192.168.0.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.054/0.129/0.204/0.075 ms


# ping tomcat-net-02 的域名
[root@Master ~]# docker exec -it tomcat-net-01 ping tomcat-net-02
PING tomcat-net-02 (192.168.0.3) 56(84) bytes of data.
64 bytes from tomcat-net-02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.052 ms
64 bytes from tomcat-net-02.mynet (192.168.0.3): icmp_seq=2 ttl=64 time=0.058 ms
^C
--- tomcat-net-02 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.052/0.055/0.058/0.003 ms

自定义的网络 docker 都已经帮我们维护好了对应的关系,推荐这样使用网络

好处:

redis - 不同的集群使用不同的网络,保证集群的安全和健康

mysql - 不同的集群使用不同的网络,保证集群的安全和健康

网络连通

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# 测试 docker0 连接mynet
docker network connect mynet tomcat01

# 查看mynet 配置 发现 将 tomcat01放在了mynet网络下
# 即一个容器两个ip 类似 阿里云的 公网IP 内网IP
[root@Master ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "f4441cbd2d23b05fae43055ff959721dc88404e6cd04734751f9692d6cc0e5c0",
"Created": "2020-07-01T14:51:12.97829315+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"0db62ba71481ab8b7969e2b74564a93f5464effa71aee4368591062e431bf268": {
"Name": "tomcat01",
"EndpointID": "ee98b2eeff94c3fdf06d98318b5213fc2c2b95075eb3560cb975b8338536dc93",
"MacAddress": "02:42:c0:a8:00:04",
"IPv4Address": "192.168.0.4/16",
"IPv6Address": ""
},
"ecc63c647175430885e3fb56ec7e4b1131ad83da20d78cef703f3191de42d4ed": {
"Name": "tomcat-net-01",
"EndpointID": "ed39f17e261fd2ebe395d59b14a6a08128c951d526b295828ce6b42e27c00455",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"f615efee2ba8b9000f78712909b7f00fbfebf075075847b7b487a99dc9b083e3": {
"Name": "tomcat-net-02",
"EndpointID": "44e2f7596ca65e69005b801687ac86d5f8b5de4b9a5783b7eb5d5641f1ed818a",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]

# 测试 tomcat01 ping tomcat-net-01
[root@Master ~]# docker exec -it tomcat01 ping tomcat-net-01
PING tomcat-net-01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.116 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.063 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.064 ms
^C
--- tomcat-net-01 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.063/0.081/0.116/0.024 ms

结论:假设需要款网络操作别人,就需要docker network connect 连通

实战:部署redis 集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# 1.创建一个redis的网卡 
docker network create redis --subnet 172.135.0.0/16

# 查看是否创建
[root@Master ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
01a782518f1d bridge bridge local
35e3df1b1282 host host local
f4441cbd2d23 mynet bridge local
4b4d05825849 none null local
dce2416c2fab redis bridge local

# 2.通过脚本创建6个redis配置
for port in $(seq 1 6); \
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF > /mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.135.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done

docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \

# 3.启动redis 容器
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /mydata/redis/node-1/data:/data \
-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

docker run -p 6372:6379 -p 16372:16379 --name redis-2 \
-v /mydata/redis/node-2/data:/data \
-v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.12 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

docker run -p 6373:6379 -p 16373:16379 --name redis-3 \
-v /mydata/redis/node-3/data:/data \
-v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.13 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

docker run -p 6374:6379 -p 16374:16379 --name redis-4 \
-v /mydata/redis/node-4/data:/data \
-v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.14 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

docker run -p 6375:6379 -p 16375:16379 --name redis-5 \
-v /mydata/redis/node-5/data:/data \
-v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.15 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
-v /mydata/redis/node-6/data:/data \
-v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.135.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

# 随便进入一个redis 容器中
[root@Master ~]# docker exec -it redis-1 /bin/sh
/data #

# 创建集群
redis-cli --cluster create 172.135.0.11:6379 172.135.0.12:6379 172.135.0.13:6379 172.135.0.14:6379 172.135.0.15:6379 172.135.0.16:6379 --cluster-replicas 1

# 创建集群后的回显
/data # redis-cli --cluster create 172.135.0.11:6379 172.135.0.12:6379 172.135.0.13:6379 172.135.0.14:6379 172.135.0.15:63
79 172.135.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.135.0.15:6379 to 172.135.0.11:6379
Adding replica 172.135.0.16:6379 to 172.135.0.12:6379
Adding replica 172.135.0.14:6379 to 172.135.0.13:6379
M: 5c4655d3feba365bec575654e9c59d7977e53153 172.135.0.11:6379
slots:[0-5460] (5461 slots) master
M: c82deef59581696248379b5cb9b3797e55dcde44 172.135.0.12:6379
slots:[5461-10922] (5462 slots) master
M: 3df847a434e97b12f55d6a8ad0b7bbb206db8647 172.135.0.13:6379
slots:[10923-16383] (5461 slots) master
S: 34c5a91dd48df6f576181daebe6b4cccd0fc66a1 172.135.0.14:6379
replicates 3df847a434e97b12f55d6a8ad0b7bbb206db8647
S: 237e0936b9c8dd26742c838446efe217cbc9035b 172.135.0.15:6379
replicates 5c4655d3feba365bec575654e9c59d7977e53153
S: 564557ac9f7ab039639478e1bef66e68c3e2ebcd 172.135.0.16:6379
replicates c82deef59581696248379b5cb9b3797e55dcde44
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 172.135.0.11:6379)
M: 5c4655d3feba365bec575654e9c59d7977e53153 172.135.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: c82deef59581696248379b5cb9b3797e55dcde44 172.135.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 34c5a91dd48df6f576181daebe6b4cccd0fc66a1 172.135.0.14:6379
slots: (0 slots) slave
replicates 3df847a434e97b12f55d6a8ad0b7bbb206db8647
M: 3df847a434e97b12f55d6a8ad0b7bbb206db8647 172.135.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 564557ac9f7ab039639478e1bef66e68c3e2ebcd 172.135.0.16:6379
slots: (0 slots) slave
replicates c82deef59581696248379b5cb9b3797e55dcde44
S: 237e0936b9c8dd26742c838446efe217cbc9035b 172.135.0.15:6379
slots: (0 slots) slave
replicates 5c4655d3feba365bec575654e9c59d7977e53153
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
/data #

# docker 搭建redis集群完成!

SpringBoot 打包 docker 镜像

  1. 构建springboot项目
  2. 打包应用
  3. 编写dockerfile
  4. 构建镜像
  5. 发布运行

Docker Compose

Docker Swarm

CI/CD 之Jenkins

docker管理工具