Docker 网络
[外链图片存储失败,源站可能有防盗链机制,建议保存图片直接上传(img-fZzs1BHX-1653915420756)(…/…/图片/typora/111111111111111.jpg)]
veth-pair 是一对虚拟设备接口,和 tap/tun 不同的是,设备成对出现。一端与协议栈相连,另一端相互连接。
[root@testhost ~]# ip ad 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:bc:54:3c brd ff:ff:ff:ff:ff:ff inet 192.168.8.104/24 brd 192.168.8.255 scope global dynamic noprefixroute ens33 valid_lft 48185sec preferred_lft 48185sec inet6 fd20:da22:32d4:1e00:cda:9261:c8ac:4855/64 scope global dynamic noprefixroute valid_lft 7038sec preferred_lft 3438sec inet6 fe80::1370:ec5d:2644:b710/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:42:2b:64:5d:db brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::42:2bff:fe64:5ddb/64 scope link valid_lft forever preferred_lft forever 5: vethd2aa05f@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default link/ether 2e:35:ae:bf:c5:d3 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::2c35:aeff:febf:c5d3/64 scope link valid_lft forever preferred_lft forever 13: veth80149d4@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default link/ether 5e:4e:de:5c:2c:07 brd ff:ff:ff:ff:ff:ff link-netnsid 2 inet6 fe80::5c4e:deff:fe5c:2c07/64 scope link valid_lft forever preferred_lft forever
– link
[root@testhost ~]# docker exec -it tomcat01 ping 172.17.0.3 PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data. 64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.107 ms 64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.059 ms 64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.078 ms 64 bytes from 172.17.0.3: icmp_seq=4 ttl=64 time=0.067/span> ms 64 bytes from 172.17.0.3: icmp_seq=5 ttl=64 time=0.045 ms 64 bytes from 172.17.0.3: icmp_seq=6 ttl=64 time=0.065 ms ^C --- 172.17.0.3 ping statistics --- 6 packets transmitted, 6 received, 0% packet loss, time 5125ms rtt min/avg/max/mdev = 0.045/0.070/0.107/0.019 ms [root@testhost ~]# docker exec -it tomcat01 ping lxftomcat ping: lxftomcat: Name or service not known [root@testhost ~]# docker run -d -P --name tomcat02 --link lxftomcat diytomcat ee6667300af7d609b21f824f9f9f39fca0bfb0f27c0c9dccf11e351c06072eb6 [root@testhost ~]# docker exec -it tomcat02 ping lxftomcat PING lxftomcat (172.17.0.3) 56(84) bytes of data. 64 bytes from lxftomcat (172.17.0.3): icmp_seq=1 ttl=64 time=0.131 ms 64 bytes from lxftomcat (172.17.0.3): icmp_seq=2 ttl=64 time=0.050 ms 64 bytes from lxftomcat (172.17.0.3): icmp_seq=3 ttl=64 time=0.044 ms 64 bytes from lxftomcat (172.17.0.3): icmp_seq=4 ttl=64 time=0.045 ms ^C --- lxftomcat ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3091ms rtt min/avg/max/mdev = 0.044/0.067/0.131/0.037 ms [root@testhost ~]# docker inspect ee6667300af7 | cat -n | grep lxf 63 "/lxftomcat:/tomcat02/lxftomcat" [root@testhost ~]# docker exec -it tomcat02 cat /etc/hosts 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters 172.17.0.3 lxftomcat 497652168d31 172.17.0.5 ee6667300af7 [root@testhost ~]#
自定义网络
查看所有网络
[root@testhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
b04fa2d26754 bridge bridge local # 桥接模式
f749e9fa8a2c host host local # 主机模式
e6f409cc717e none null local # 不配置网络
caotainers # 容器互联网络
# docker run -d -P --name tomcat01 --net brige diytomcat 默认参数--net brige
[root@testhost ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
53395142c4a8ff9e26df1de752c920f8db36c842a22eca3de3df140cf2d75046
[root@testhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
b04fa2d26754 bridge bridge local
f749e9fa8a2c host host local
53395142c4a8 mynet bridge local
e6f409cc717e none null local
[root@testhost ~]# docker network inspect 53395142c4a8
[
{
"Name": "mynet",
"Id": "53395142c4a8ff9e26df1de752c920f8db36c842a22eca3de3df140cf2d75046",
"Created": "2022-05-29T07:17:03.251644161+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {
},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
},
"Options": {
},
"Labels": {
}
}
]
[root@testhost ~]#ip add
20: br-53395142c4a8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:39:7f:49:4e brd ff:ff:ff:ff:ff:ff
inet 192.168.0.1/16 brd 192.168.255.255 scope global br-53395142c4a8
valid_lft forever preferred_lft forever
inet6 fe80::42:39ff:fe7f:494e/64 scope link
valid_lft forever preferred_lft forever
使用自己网络可以支持 ping 容器名称
[root@testhost ~]# docker run -d -P --name tomcat01 --net mynet diytomcat
cce9e9c7664a0e405f832967cd7957dffb1036b9fd12aa8bf34ce77ae1d89d2f
[root@testhost ~]# docker run -d -P --name tomcat02 --net mynet diytomcat
9ff4b8d9669d5c80ac482651b969b034fe3bcc78be771d274589829c73924a01
root@testhost ~]# docker exec -it tomcat01 ping tomcat02
PING tomcat02 (192.168.0.3) 56(84) bytes of data.
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.068 ms
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=2 ttl=64 time=0.105 ms
^C
--- tomcat02 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1052ms
rtt min/avg/max/mdev = 0.068/0.086/0.105/0.020 ms
[root@testhost ~]# ping 192.168.0.3
PING 192.168.0.3 (192.168.0.3) 56(84) bytes of data.
64 bytes from 192.168.0.3: icmp_seq=1 ttl=64 time=0.062 ms
^C
--- 192.168.0.3 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.062/0.062/0.062/0.000 ms
[root@testhost ~]# ping 192.168.0.2
PING 192.168.0.2 (192.168.0.2) 56(84) bytes of data.
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.062 ms
^C
--- 192.168.0.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.062/0.062/0.062/0.000 ms
[root@testhost ~]#
docker 网络联通
# docker network connect mynet lxftomcat
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-sVXAEKHB-1653915420757)(…/…/图片/typora/1653751671852.png)]
练习 Redis 集群部署
for port in $(seq 1 6)
do \
mkdir -p /root/redis/node-${port}/conf
touch /root/redis/node-${port}/conf/redis.conf
cat <<EOF>> /root/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
docker run -p 6471:6379 -p 16371:16379 --name redis-1 -v /root/redis/node-1/data:/data -v /root/redis/node-1/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6472:6379 -p 16372:16379 --name redis-2 -v /root/redis/node-2/data:/data -v /root/redis/node-2/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.12 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6473:6379 -p 16373:16379 --name redis-3 -v /root/redis/node-3/data:/data -v /root/redis/node-3/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.13 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6474:6379 -p 16374:16379 --name redis-4 -v /root/redis/node-4/data:/data -v /root/redis/node-4/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.14 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6475:6379 -p 16375:16379 --name redis-5 -v /root/redis/node-5/data:/data -v /root/redis/node-5/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.15 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6476:6379 -p 16376:16379 --name redis-6 -v /root/redis/node-6/data:/data -v /root/redis/node-6/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker exec -it redis-1 /bin/sh
redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: adc7a4b756a86c0e14c52e68ddc80355654dc073 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
M: 2258330dac90eb5324963bbeb5a6ab33579584cc 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
M: 9904a350bd3b35ffeaa1de3a3ff7051569c25ff8 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
S: dfc4412e495e1ec6c37fe71e003e4a49976d80ac 172.38.0.14:6379
replicates 9904a350bd3b35ffeaa1de3a3ff7051569c25ff8
S: 8fc9bad38b81bf4831650b93275869ee28e2e5a2 172.38.0.15:6379
replicates adc7a4b756a86c0e14c52e68ddc80355654dc073
S: ea3071f1260e9af6212fe274e698da64e6b636af 172.38.0.16:6379
replicates 2258330dac90eb5324963bbeb5a6ab33579584cc
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: adc7a4b756a86c0e14c52e68ddc80355654dc073 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 8fc9bad38b81bf4831650b93275869ee28e2e5a2 172.38.0.15:6379
slots: (0 slots) slave
replicates adc7a4b756a86c0e14c52e68ddc80355654dc073
S: ea3071f1260e9af6212fe274e698da64e6b636af 172.38.0.16:6379
slots: (0 slots) slave
replicates 2258330dac90eb5324963bbeb5a6ab33579584cc
S: dfc4412e495e1ec6c37fe71e003e4a49976d80ac 172.38.0.14:6379
slots: (0 slots) slave
replicates 9904a350bd3b35ffeaa1de3a3ff7051569c25ff8
M: 9904a350bd3b35ffeaa1de3a3ff7051569c25ff8 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 2258330dac90eb5324963bbeb5a6ab33579584cc 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
/data # redis-cli -c
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:62
cluster_stats_messages_pong_sent:64
cluster_stats_messages_sent:126
cluster_stats_messages_ping_received:59
cluster_stats_messages_pong_received:62
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:126
127.0.0.1:6379> cluster nodes
adc7a4b756a86c0e14c52e68ddc80355654dc073 172.38.0.11:6379@16379 myself,master - 0 1653783937000 1 connected 0-5460
8fc9bad38b81bf4831650b93275869ee28e2e5a2 172.38.0.15:6379@16379 slave adc7a4b756a86c0e14c52e68ddc80355654dc073 0 1653783938000 5 connected
ea3071f1260e9af6212fe274e698da64e6b636af 172.38.0.16:6379@16379 slave 2258330dac90eb5324963bbeb5a6ab33579584cc 0 1653783937500 6 connected
dfc4412e495e1ec6c37fe71e003e4a49976d80ac 172.38.0.14:6379@16379 slave 9904a350bd3b35ffeaa1de3a3ff7051569c25ff8 0 1653783937000 4 connected
9904a350bd3b35ffeaa1de3a3ff7051569c25ff8 172.38.0.13:6379@16379 master - 0 1653783938513 3 connected 10923-16383
2258330dac90eb5324963bbeb5a6ab33579584cc 172.38.0.12:6379@16379 master - 0 1653783937000 2 connected 5461-10922
127.0.0.1:6379>
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK
172.38.0.13:6379> get a
^C
/data # redis-cli -c
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.38.0.14:6379
"b"
172.38.0.14:6379>
172.38.0.14:6379> cluster nodes
9904a350bd3b35ffeaa1de3a3ff7051569c25ff8 172.38.0.13:6379@16379 master,fail - 1653784002739 1653784002536 3 connected
2258330dac90eb5324963bbeb5a6ab33579584cc 172.38.0.12:6379@16379 master - 0 1653784064586 2 connected 5461-10922
8fc9bad38b81bf4831650b93275869ee28e2e5a2 172.38.0.15:6379@16379 slave adc7a4b756a86c0e14c52e68ddc80355654dc073 0 1653784064995 5 connected
ea3071f1260e9af6212fe274e698da64e6b636af 172.38.0.16:6379@16379 slave 2258330dac90eb5324963bbeb5a6ab33579584cc 0 1653784064000 6 connected
dfc4412e495e1ec6c37fe71e003e4a49976d80ac 172.38.0.14:6379@16379 myself,master - 0 1653784063000 7 connected 10923-16383
adc7a4b756a86c0e14c52e68ddc80355654dc073 172.38.0.11:6379@16379 master - 0 1653784065503 1 connected 0-5460
172.38.0.14:6379>
Docker Compose
Docker Compose |概述Docker 文档
简介
Compose 是用于定义和运行多容器 Docker 应用程序的工具。通过 Compose,您可以使用 YML 文件来配置应用程序需要的所有服务。然后,使用一个命令,就可以从 YML 文件配置中创建并启动所有服务。
Compose 使用的三个步骤:
- 使用 Dockerfile 定义应用程序的环境。
- 使用 docker-compose.yml 定义构成应用程序的服务,这样它们可以在隔离环境中一起运行。
- 最后,执行 docker-compose up 命令来启动并运行整个应用程序。
作用:批量容器编排
安装
先决条件
Docker Compose 依靠 Docker Engine 进行任何有意义的工作,因此请确保在本地或远程安装了 Docker 引擎,具体取决于您的设置。
- 在桌面系统上,如Docker Desktop for Mac和Windows,Docker Compose作为这些桌面安装的一部分包含在内。
- 在 Linux 系统上,您可以使用方便脚本使用 Docker 引擎安装 Docker Compose。为您的发行版选择安装 Docker 引擎页面,然后查找有关使用方便脚本进行安装的说明。 否则,您应该首先为您的操作系统安装 Docker 引擎,然后参阅此页面以获取有关在 Linux 系统上安装 Compose 的说明。
- 要以非 root 用户身份运行 Compose,请参阅以非 root 用户身份管理 Docker。
安装撰写
按照以下说明在 Mac、Windows、Windows Server 或 Linux 系统上安装 Compose。
安装其他版本
以下说明概述了 Compose 当前稳定版本 () 的安装。若要安装其他版本的 Compose,请将给定的版本号替换为所需的版本号。
还会在 GitHub 上的撰写存储库版本页面上列出并直接下载撰写版本。
若要安装 Python 版本的 Compose,请按照 Compose v1 GitHub 分支中的说明进行操作。
-
苹果电脑
-
窗户
-
视窗服务器
-
Linux
-
Linux 独立二进制文件
在 Linux 系统上安装 Compose
您可以根据需要以不同的方式安装 Docker Compose:
- 在测试和开发环境中,一些用户选择使用自动化的便利脚本来安装 Docker。
- 大多数用户设置 Docker 的存储库并从中安装,以便于安装和升级任务。这是推荐的方法。
- 某些用户下载并安装二进制文件,并手动管理升级。
使用便利脚本进行安装
由于 Docker Compose 现在是 Docker CLI 的一部分,因此可以通过带有 Docker 引擎和 CLI 的便利脚本进行安装。 选择您的 Linux 发行版,然后按照说明进行操作。
使用存储库安装
如果您已经按照说明安装 Docker 引擎,则应该已经安装了 Docker Compose。 否则,您可以按照 Docker 引擎安装中提到的方式设置 Docker 存储库,选择您的 Linux 发行版并转到该部分。Set up the repository
完成后
-
更新包索引,并安装最新版本的 Docker Compose,或转到下一步以安装特定版本:
apt
$ sudo apt-get update $ sudo apt-get install docker-compose-plugin
-
要安装特定版本的 Docker 引擎,请在存储库中列出可用版本,然后选择并安装:
一个。列出存储库中可用的版本:
$ apt-cache madison docker-compose-plugin docker-compose-plugin | 2.3.3~ubuntu-focal | https://download.docker.com/linux/ubuntu focal/stable arm64 Packages
b.使用第二列中的版本字符串安装特定版本,例如 。
2.3.3~ubuntu-focal
$ sudo apt-get install docker-compose-plugin=<VERSION_STRING>
-
通过检查版本来验证 Docker Compose 是否已正确安装。
$ docker compose version Docker Compose version v2.3.3
手动安装二进制文件
在 Linux 上,您可以从 GitHub 上的 Compose 存储库发布页面下载 Docker Compose 二进制文件,并将其复制为 。按照链接中的说明进行操作,这涉及在终端中运行命令以下载二进制文件。下面还包括这些分步说明。$HOME/.docker/cli-plugins``docker-compose``curl
-
运行以下命令以下载 Docker Compose 的当前稳定版本:
$ DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker} $ mkdir -p $DOCKER_CONFIG/cli-plugins $ curl -SL https://get.daocloud.io/docker/compose/releases/download/v2.5.0/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-x56xl8Eu-1653915420757)(…/…/图片/typora/1653792936026.png)]
此命令在目录下为活动用户安装撰写。要为系统上的所有用户安装 Docker Compose,请将 替换为 。
$HOME``~/.docker/cli-plugins``/usr/local/lib/docker/cli-plugins
若要安装其他版本的 Compose,请使用要使用的 Compose 版本进行替换。
v2.5.0
-
将可执行权限应用于二进制文件:
$ chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
或者,如果您选择为所有用户安装撰写
$ sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
-
测试安装。
$ docker compose version Docker Compose version v2.5.0
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-1e9zwiUl-1653915420758)(…/…/图片/typora/1653793029364.png)]
卸载
要卸载 Docker Compose(如果您使用 以下工具安装):curl
$ rm $DOCKER_CONFIG/cli-plugins/docker-compose
或者,如果您选择为所有用户安装撰写
$ sudo rm /usr/local/lib/docker/cli-plugins/docker-compose
出现“权限被拒绝”错误?
如果您在使用上述任一方法时收到“权限被拒绝”错误,则您可能没有删除 的适当权限。要强制删除,请先附加上述任一命令,然后再次运行。
docker-compose``sudo
体验
-
应用app.py
import time import redis from flask import Flask app = Flask(__name__) cache = redis.Redis(host='redis', port=6379) def get_hit_count(): retries = 5 while True: try: return cache.incr('hits') except redis.exceptions.ConnectionError as exc: if retries == 0: raise exc retries -= 1 time.sleep(0.5) @app.route('/') def hello(): count = get_hit_count() return 'Hello World! I have been seen {} times.\n'.format(count)
-
dockerfile 应用打包成镜像
# syntax=docker/dockerfile:1 FROM python:3.7-alpine WORKDIR /code ENV FLASK_APP=app.py ENV FLASK_RUN_HOST=0.0.0.0 RUN apk add --no-cache gcc musl-dev linux-headers COPY requirements.txt requirements.txt RUN pip install -r requirements.txt EXPOSE 5000 COPY . . CMD ["flask", "run"]
-
docker compose yml 文件
version: "3.9" services: web: build: . ports: - "8000:5000" redis: image: "redis:alpine"
-
启动compose 项目
流程:
- 创建网络
- 执行Docker compose yaml
- 启动服务
[root@testhost composetest]# docker compose up
[+] Running 7/7
⠿ redis Pulled 19.7s
⠿ 59bf1c3509f3 Pull complete 5.8s
⠿ 719adce26c52 Pull complete 6.0s
⠿ b8f35e378c31 Pull complete 6.9s
⠿ d034517f789c Pull complete 8.2s
⠿ 3772d4d76753 Pull complete 8.3s
⠿ 211a7f52febb Pull complete 8.4s
[+] Building 462.1s (15/15) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 320B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> resolve image config for docker.io/docker/dockerfile:1 17.9s
=> docker-image://docker.io/docker/dockerfile:1@sha256:42399d4635eddd7a9b8a24be879d2f9a930d0ed040a61324cfdf59 2.1s
=> => resolve docker.io/docker/dockerfile:1@sha256:42399d4635eddd7a9b8a24be879d2f9a930d0ed040a61324cfdf59ef13 0.0s
=> => sha256:42399d4635eddd7a9b8a24be879d2f9a930d0ed040a61324cfdf59ef1357b3b2 2.00kB / 2.00kB 0.0s
=> => sha256:93f32bd6dd9004897fed4703191f48924975081860667932a4df35ba567d7426 528B / 528B 0.0s
=> => sha256:e532695ddd93ca7c85a816c67afdb352e91052fab7ac19a675088f80915779a7 1.21kB / 1.21kB 0.0s
=> => sha256:24a639a53085eb680e1d11618ac62f3977a3926fedf5b8471ace519b8c778030 9.67MB / 9.67MB 1.3s
=> => extracting sha256:24a639a53085eb680e1d11618ac62f3977a3926fedf5b8471ace519b8c778030 0.7s
=> [internal] load .dockerignore 0.0s
=> [internal] load build definition from Dockerfile 0.0s
=> [internal] load metadata for docker.io/library/python:3.7-alpine 20.0s
=> [1/6] FROM docker.io/library/python:3.7-alpine@sha256:d64e0124674d64e78cc9d7378a1130499ced66a7a00db0521d01 4.1s
=> => resolve docker.io/library/python:3.7-alpine@sha256:d64e0124674d64e78cc9d7378a1130499ced66a7a00db0521d01 0.0s
=> => sha256:a1034fd13493569ef2975c7c8c3bdd8aecee43aa2a9f102a24380b9cb724535b 8.10kB / 8.10kB 0.0s
=> => sha256:8786870f287676cca49c1e1e5029467c087ad293b824fcddc489cbe2819745b2 282.15kB / 282.15kB 0.7s
=> => sha256:45d4696938d0cf96f85ae2d9a8eb8c0bb018e07b0f2a820c3118a7cb54e4dad8 10.55MB / 10.55MB 1.8s
=> => sha256:ef84af58b2c5560afbdd9265a475c59fe10c72032153ab8a79e77b961985c580 230B / 230B 0.3s
=> => sha256:d64e0124674d64e78cc9d7378a1130499ced66a7a00db0521d0120a2e88ac9e4 1.65kB / 1.65kB 0.0s
=> => sha256:98bdc8dd3aa5157f990a13a9cb58d4f9b8d089ae825c0d85b9d68e0c5f1918bd 1.37kB / 1.37kB 0.0s
=> => sha256:c3c9b71b9a6957b500a5a45f5608c0099a8e7c9ba0714701cd041198d95b319b 2.35MB / 2.35MB 1.7s
=> => extracting sha256:8786870f287676cca49c1e1e5029467c087ad293b824fcddc489cbe2819745b2 0.2s
=> => extracting sha256:45d4696938d0cf96f85ae2d9a8eb8c0bb018e07b0f2a820c3118a7cb54e4dad8 1.7s
=> => extracting sha256:ef84af58b2c5560afbdd9265a475c59fe10c72032153ab8a79e77b961985c580 0.0s
=> => extracting sha256:c3c9b71b9a6957b500a5a45f5608c0099a8e7c9ba0714701cd041198d95b319b 0.4s
=> [internal] load build context 0.0s
=> => transferring context: 1.09kB 0.0s
=> [2/6] WORKDIR /code 0.3s
=> [3/6] RUN apk add --no-cache gcc musl-dev linux-headers 396.5s
=> [4/6] COPY requirements.txt requirements.txt 0.1s
=> [5/6] RUN pip install -r requirements.txt 19.7s
=> [6/6] COPY . . 0.1s
=> exporting to image 1.1s
=> => exporting layers 1.1s
=> => writing image sha256:1cb2591260901eaa54b43729cdbc7f096d5ae66e4bcb39e478c36bc7d90fcfc1 0.0s
=> => naming to docker.io/library/composetest_web 0.0s
Use 'docker scan' to run Snyk tests against images to find vulnerabilities and learn how to fix them
[+] Running 3/3
⠿ Network composetest_default Created 0.2s
⠿ Container composetest-redis-1 Created 0.1s
⠿ Container composetest-web-1 Created 0.1s
Attaching to composetest-redis-1, composetest-web-1
composetest-redis-1 | 1:C 29 May 2022 11:15:48.727 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
composetest-redis-1 | 1:C 29 May 2022 11:15:48.727 # Redis version=6.2.6, bits=64, commit=00000000, modified=0, pid=1, just started
composetest-redis-1 | 1:C 29 May 2022 11:15:48.727 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
composetest-redis-1 | 1:M 29 May 2022 11:15:48.732 * monotonic clock: POSIX clock_gettime
composetest-redis-1 | 1:M 29 May 2022 11:15:48.733 * Running mode=standalone, port=6379.
composetest-redis-1 | 1:M 29 May 2022 11:15:48.733 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
composetest-redis-1 | 1:M 29 May 2022 11:15:48.733 # Server initialized
composetest-redis-1 | 1:M 29 May 2022 11:15:48.733 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
composetest-redis-1 | 1:M 29 May 2022 11:15:48.733 * Ready to accept connections
composetest-web-1 | * Serving Flask app 'app.py' (lazy loading)
composetest-web-1 | * Environment: production
composetest-web-1 | WARNING: This is a development server. Do not use it in a production deployment.
composetest-web-1 | Use a production WSGI server instead.
composetest-web-1 | * Debug mode: off
composetest-web-1 | * Running on all addresses (0.0.0.0)
composetest-web-1 | WARNING: This is a development server. Do not use it in a production deployment.
composetest-web-1 | * Running on http://127.0.0.1:5000
composetest-web-1 | * Running on http://172.18.0.2:5000 (Press CTRL+C to quit)
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:16:54] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:16:54] "GET /favicon.ico HTTP/1.1" 404 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:38] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:39] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:39] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:40] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:40] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:41] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:41] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:42] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:42] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:42] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:43] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:43] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:43] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:43] "GET / HTTP/1.1" 200 -
composetest-web-1 | 172.18.0.1 - - [29/May/2022 11:17:43] "GET / HTTP/1.1" 200 -
停止服务 ctrl c
在项目目录下 dockers compose down
^CGracefully stopping... (press Ctrl+C again to force)
[+] Running 2/2
⠿ Container composetest-web-1 Stopped 10.3s
⠿ Container composetest-redis-1 Stopped 0.2s
canceled
[root@testhost composetest]#
yaml文件
https://docs.docker.com/compose/compose-file/
下面的示例阐释了使用具体示例应用程序编写规范概念。该样本是非规范性的。
services:
frontend:
image: awesome/webapp
build: ./webapp
backend:
image: awesome/database
build:
context: backend
dockerfile: ../backend.Dockerfile
custom:
build: ~/custom
当用于从源构建服务映像时,这样的 Compose 文件将创建三个 docker 映像:
awesome/webapp
docker 映像是使用 Compose 文件父文件夹中的子目录作为 docker 构建上下文构建的。此文件夹中缺少 a 将引发错误。webapp``Dockerfile
awesome/database
docker 映像是使用 Compose 文件父文件夹中的子目录构建的。 file 用于定义构建步骤,此文件是相对于上下文路径搜索的,这意味着对于此示例,将解析为 Compose 文件父文件夹,因此是同级文件。backend``backend.Dockerfile``..``backend.Dockerfile
- Docker 映像是使用用户 HOME 中的目录作为 docker 上下文构建的。撰写实现警告用户用于构建映像的非可移植路径。
custom
在推送时,两个和 Docker 映像都将推送到(默认)注册表。 将跳过服务映像,因为未设置任何属性,并且会警告用户此缺少的属性。awesome/webapp``awesome/database``custom``Image
生成定义
该元素定义 Compose 实现应用于从源构建 Docker 映像的配置选项。 可以指定为包含构建上下文路径的字符串或详细结构:build``build
services:
webapp:
build: ./dir
使用此字符串语法,只能将生成上下文配置为 Compose 文件父文件夹的相对路径。此路径必须是目录并包含 .Dockerfile
或者,可以是具有以下定义字段的对象build
上下文(必填)
context
定义指向包含 Dockerfile 的目录的路径,或指向 git 存储库的 url。
当提供的值是相对路径时,必须将其解释为相对于 Compose 文件的位置。Compose 实现必须警告用户用于定义构建上下文的绝对路径,因为这些路径会阻止 Compose 文件可移植。
build:
context: ./dir
dockerfile
dockerfile
允许设置备用 Dockerfile。必须从生成上下文解析相对路径。撰写实现必须警告用户用于定义Dockerfile的绝对路径,因为这些路径会阻止撰写文件可移植。
build:
context: .
dockerfile: webapp.Dockerfile
参数
args`定义构建参数,即 Dockerfile 值。`ARG
使用以下 Dockerfile:
ARG GIT_COMMIT
RUN echo "Based on commit: $GIT_COMMIT"
args`可以在“撰写文件”中的键下设置以定义 。 可以设置映射或列表:`build``GIT_COMMIT``args
build:
context: .
args:
GIT_COMMIT: cdc3b19
build:
context: .
args:
- GIT_COMMIT=cdc3b19
指定构建参数时可以省略 Value,在这种情况下,必须在用户交互中获取其在构建时的值,否则在构建 Docker 映像时不会设置 build arg。
args:
- GIT_COMMIT
断续器
ssh
定义了映像生成器在映像构建期间应使用的 SSH 身份验证(例如,克隆专用存储库)
ssh
属性语法可以是:
default
- 让构建器连接到 ssh 代理。ID=path
- ID 和关联路径的键/值定义。可以是 PEM 文件,也可以是 ssh 代理套接字的路径
简单示例default
build:
context: .
ssh:
- default # mount the default ssh agent
或
build:
context: .
ssh: ["default"] # mount the default ssh agent
使用具有本地 SSH 密钥路径的自定义 ID:myproject
build:
context: .
ssh:
- myproject=~/.ssh/myproject.pem
然后,映像生成器可以依靠此在生成期间装载 SSH 密钥。为了便于说明,BuildKit 扩展语法可用于挂载由 ID 设置的 ssh 密钥并访问受保护的资源:
RUN --mount=type=ssh,id=myproject git clone ...
cache_from
cache_from
定义了映像生成器用于缓存分辨率的源列表。
缓存位置语法必须遵循全局格式 。简单实际上是 的快捷方式表示法。[NAME|type=TYPE[,KEY=VALUE]]``NAME``type=registry,ref=NAME
撰写生成器实现可能支持自定义类型,撰写规范定义了必须支持的规范类型:
registry
从按键设置的 OCI 映像中检索构建缓存ref
build:
context: .
cache_from:
- alpine:latest
- type=local,src=path/to/cache
- type=gha
必须忽略不受支持的缓存,并且不能阻止用户生成映像。
cache_to
cache_to
定义要用于与将来的生成共享生成缓存的导出位置列表。
build:
context: .
cache_to:
- user/app:cache
- type=local,dest=path/to/cache
缓存目标使用cache_from
定义的相同语法进行定义。type=TYPE[,KEY=VALUE]
必须忽略不受支持的缓存目标,并且不能阻止用户构建映像。
extra_hosts
extra_hosts
在构建时添加主机名映射。使用与extra_hosts相同的语法。
extra_hosts:
- "somehost:162.242.195.82"
- "otherhost:50.31.209.229"
编写实现必须在容器的网络配置中创建具有IP地址和主机名的匹配条目,这意味着对于Linux将获得额外的行:/etc/hosts
162.242.195.82 somehost
50.31.209.229 otherhost
隔离
isolation
指定生成的容器隔离技术。像隔离支持的值是特定于平台的。
标签
labels`将元数据添加到生成的图像。 可以设置为数组或映射。`labels
应使用反向 DNS 表示法来防止标签与其他软件使用的标签冲突。
build:
context: .
labels:
com.example.description: "Accounting webapp"
com.example.department: "F