Merge pull request #3612 from thaJeztah/daemon_cluster_opts

remove documentation and completion for deprecated legacy overlay networks
This commit is contained in:
Sebastiaan van Stijn 2022-05-17 14:30:49 +02:00 committed by GitHub
commit e55151fb7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 185 additions and 259 deletions

View File

@ -321,8 +321,6 @@ func prettyPrintServerInfo(dockerCli command.Cli, info types.Info) []error {
}
fmt.Fprintln(dockerCli.Out(), " Experimental:", info.ExperimentalBuild)
fprintlnNonEmpty(dockerCli.Out(), " Cluster Store:", info.ClusterStore)
fprintlnNonEmpty(dockerCli.Out(), " Cluster Advertise:", info.ClusterAdvertise)
if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) {
fmt.Fprintln(dockerCli.Out(), " Insecure Registries:")

View File

@ -98,8 +98,6 @@ var sampleInfoNoSwarm = types.Info{
Labels: []string{"provider=digitalocean"},
ExperimentalBuild: false,
ServerVersion: "17.06.1-ce",
ClusterStore: "",
ClusterAdvertise: "",
Runtimes: map[string]types.Runtime{
"runc": {
Path: "docker-runc",

View File

@ -2523,9 +2523,6 @@ _docker_daemon() {
--bip
--bridge -b
--cgroup-parent
--cluster-advertise
--cluster-store
--cluster-store-opt
--config-file
--containerd
--containerd-namespace
@ -2574,15 +2571,6 @@ _docker_daemon() {
__docker_complete_log_driver_options && return
key=$(__docker_map_key_of_current_option '--cluster-store-opt')
case "$key" in
kv.*file)
cur=${cur##*=}
_filedir
return
;;
esac
local key=$(__docker_map_key_of_current_option '--storage-opt')
case "$key" in
dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion)
@ -2609,16 +2597,6 @@ _docker_daemon() {
__docker_complete_plugins_bundled --type Authorization
return
;;
--cluster-store)
COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) )
__docker_nospace
return
;;
--cluster-store-opt)
COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) )
__docker_nospace
return
;;
--config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path)
_filedir
return

View File

@ -2731,9 +2731,6 @@ __docker_subcommand() {
"($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
"($help)--bip=[Network bridge IP]:IP address: " \
"($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \
"($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \
"($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \
"($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \
"($help)--config-file=[Path to daemon configuration file]:Config File:_files" \
"($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \
"($help)--containerd-namespace=[Containerd namespace to use]:containerd namespace:" \
@ -2792,22 +2789,6 @@ __docker_subcommand() {
"($help)--validate[Validate daemon configuration and exit]" && ret=0
case $state in
(cluster-store)
if compset -P '*://'; then
_message 'host:port' && ret=0
else
store=('consul' 'etcd' 'zk')
_describe -t cluster-store "Cluster Store" store -qS "://" && ret=0
fi
;;
(cluster-store-options)
if compset -P '*='; then
_files && ret=0
else
opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path')
_describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0
fi
;;
(users-groups)
if compset -P '*:'; then
_groups && ret=0

View File

@ -1052,41 +1052,6 @@ Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linu
set the maximum number of processes available to a user, not to a container. For details
please check the [run](run.md) reference.
### Node discovery
The `--cluster-advertise` option specifies the `host:port` or `interface:port`
combination that this particular daemon instance should use when advertising
itself to the cluster. The daemon is reached by remote hosts through this value.
If you specify an interface, make sure it includes the IP address of the actual
Docker host. For Engine installation created through `docker-machine`, the
interface is typically `eth1`.
The daemon uses [libkv](https://github.com/docker/libkv/) to advertise
the node within the cluster. Some key-value backends support mutual
TLS. To configure the client TLS settings used by the daemon can be configured
using the `--cluster-store-opt` flag, specifying the paths to PEM encoded
files. For example:
```console
$ sudo dockerd \
--cluster-advertise 192.168.1.2:2376 \
--cluster-store etcd://192.168.1.2:2379 \
--cluster-store-opt kv.cacertfile=/path/to/ca.pem \
--cluster-store-opt kv.certfile=/path/to/cert.pem \
--cluster-store-opt kv.keyfile=/path/to/key.pem
```
The currently supported cluster store options are:
| Option | Description |
|:----------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `discovery.heartbeat` | Specifies the heartbeat timer in seconds which is used by the daemon as a `keepalive` mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. |
| `discovery.ttl` | Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. |
| `kv.cacertfile` | Specifies the path to a local file with PEM encoded CA certificates to trust. |
| `kv.certfile` | Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. |
| `kv.keyfile` | Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. |
| `kv.path` | Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. |
### Access authorization
Docker's access authorization can be extended by authorization plugins that your
@ -1274,9 +1239,6 @@ This is a full example of the allowed configuration options on Linux:
"bip": "",
"bridge": "",
"cgroup-parent": "",
"cluster-advertise": "",
"cluster-store": "",
"cluster-store-opts": {},
"containerd": "/run/containerd/containerd.sock",
"containerd-namespace": "docker",
"containerd-plugin-namespace": "docker-plugins",
@ -1402,8 +1364,6 @@ This is a full example of the allowed configuration options on Windows:
"allow-nondistributable-artifacts": [],
"authorization-plugins": [],
"bridge": "",
"cluster-advertise": "",
"cluster-store": "",
"containerd": "\\\\.\\pipe\\containerd-containerd",
"containerd-namespace": "docker",
"containerd-plugin-namespace": "docker-plugins",
@ -1471,9 +1431,6 @@ if there are conflicts, but it won't stop execution.
The list of currently supported options that can be reconfigured is this:
- `debug`: it changes the daemon to debug mode when set to true.
- `cluster-store`: it reloads the discovery store with the new address.
- `cluster-store-opts`: it uses the new options to reload the discovery store.
- `cluster-advertise`: it modifies the address advertised after reloading.
- `labels`: it replaces the daemon labels with a new set of labels.
- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/config/containers/live-restore/).
- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull.
@ -1491,15 +1448,6 @@ The list of currently supported options that can be reconfigured is this:
- `shutdown-timeout`: it replaces the daemon's existing configuration timeout with a new timeout for shutting down all containers.
- `features`: it explicitly enables or disables specific features.
Updating and reloading the cluster configurations such as `--cluster-store`,
`--cluster-advertise` and `--cluster-store-opts` will take effect only if
these configurations were not previously configured. If `--cluster-store`
has been provided in flags and `cluster-advertise` not, `cluster-advertise`
can be added in the configuration file without accompanied by `--cluster-store`.
Configuration reload will log a warning message if it detects a change in
previously configured cluster configurations.
### Run multiple daemons
> **Note:**

View File

@ -51,34 +51,24 @@ $ docker network create -d bridge my-bridge-network
Bridge networks are isolated networks on a single Engine installation. If you
want to create a network that spans multiple Docker hosts each running an
Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay
networks require some pre-existing conditions before you can create one. These
conditions are:
Engine, you must enable Swarm mode, and create an `overlay` network. To read more
about overlay networks with Swarm mode, see ["*use overlay networks*"](https://docs.docker.com/network/overlay/).
* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores.
* A cluster of hosts with connectivity to the key-value store.
* A properly configured Engine `daemon` on each host in the cluster.
The `dockerd` options that support the `overlay` network are:
* `--cluster-store`
* `--cluster-store-opt`
* `--cluster-advertise`
To read more about these options and how to configure them, see ["*Get started
with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay).
While not required, it is a good idea to install Docker Swarm to
manage the cluster that makes up your network. Swarm provides sophisticated
discovery and server management tools that can assist your implementation.
Once you have prepared the `overlay` network prerequisites you simply choose a
Docker host in the cluster and issue the following to create the network:
Once you have enabled swarm mode, you can create a swarm-scoped overlay network:
```console
$ docker network create -d overlay my-multihost-network
$ docker network create --scope=swarm --attachable -d overlay my-multihost-network
```
By default, swarm-scoped networks do not allow manually started containers to
be attached. This restriction is added to prevent someone that has access to
a non-manager node in the swarm cluster from running a container that is able
to access the network stack of a swarm service.
The `--attachable` option used in the example above disables this restriction,
and allows for both swarm services and manually started containers to attach to
the oerlay network.
Network names must be unique. The Docker daemon attempts to identify naming
conflicts but this is not guaranteed. It is the user's responsibility to avoid
name conflicts.
@ -121,9 +111,9 @@ disconnect` command.
### Specify advanced options
When you create a network, Engine creates a non-overlapping subnetwork for the
network by default. This subnetwork is not a subdivision of an existing
network. It is purely for ip-addressing purposes. You can override this default
and specify subnetwork values directly using the `--subnet` option. On a
network by default. This subnetwork is not a subdivision of an existing network.
It is purely for ip-addressing purposes. You can override this default and
specify subnetwork values directly using the `--subnet` option. On a
`bridge` network you can only create a single subnet:
```console
@ -221,6 +211,43 @@ $ docker network create -d overlay \
my-ingress-network
```
### Run services on predefined networks
You can create services on the predefined docker networks `bridge` and `host`.
```console
$ docker service create --name my-service \
--network host \
--replicas 2 \
busybox top
```
### Swarm networks with local scope drivers
You can create a swarm network with local scope network drivers. You do so
by promoting the network scope to `swarm` during the creation of the network.
You will then be able to use this network when creating services.
```console
$ docker network create -d bridge \
--scope swarm \
--attachable \
swarm-network
```
For network drivers which provide connectivity across hosts (ex. macvlan), if
node specific configurations are needed in order to plumb the network on each
host, you will supply that configuration via a configuration only network.
When you create the swarm scoped network, you will then specify the name of the
network which contains the configuration.
```console
node1$ docker network create --config-only --subnet 192.168.100.0/24 --gateway 192.168.100.115 mv-config
node2$ docker network create --config-only --subnet 192.168.200.0/24 --gateway 192.168.200.202 mv-config
node1$ docker network create -d macvlan --scope swarm --config-from mv-config --attachable swarm-network
```
## Related commands
* [network inspect](network_inspect.md)

View File

@ -12,9 +12,6 @@ dockerd - Enable daemon mode
[**-b**|**--bridge**[=*BRIDGE*]]
[**--bip**[=*BIP*]]
[**--cgroup-parent**[=*[]*]]
[**--cluster-store**[=*[]*]]
[**--cluster-advertise**[=*[]*]]
[**--cluster-store-opt**[=*map[]*]]
[**--config-file**[=*/etc/docker/daemon.json*]]
[**--containerd**[=*SOCKET-PATH*]]
[**--data-root**[=*/var/lib/docker*]]
@ -154,17 +151,6 @@ $ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-ru
Set parent cgroup for all containers. Default is "/docker" for fs cgroup
driver and "system.slice" for systemd cgroup driver.
**--cluster-store**=""
URL of the distributed storage backend
**--cluster-advertise**=""
Specifies the 'host:port' or `interface:port` combination that this
particular daemon instance should use when advertising itself to the cluster.
The daemon is reached through this value.
**--cluster-store-opt**=""
Specifies options for the Key/Value store.
**--config-file**="/etc/docker/daemon.json"
Specifies the JSON file path to load the configuration from.
@ -780,29 +766,6 @@ cannot be smaller than **btrfs.min_space**.
Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G`
# CLUSTER STORE OPTIONS
The daemon uses libkv to advertise the node within the cluster. Some Key/Value
backends support mutual TLS, and the client TLS settings used by the daemon can
be configured using the **--cluster-store-opt** flag, specifying the paths to
PEM encoded files.
#### kv.cacertfile
Specifies the path to a local file with PEM encoded CA certificates to trust
#### kv.certfile
Specifies the path to a local file with a PEM encoded certificate. This
certificate is used as the client cert for communication with the Key/Value
store.
#### kv.keyfile
Specifies the path to a local file with a PEM encoded private key. This
private key is used as the client key for communication with the Key/Value
store.
# Access authorization
Docker's access authorization can be extended by authorization plugins that

View File

@ -24,8 +24,8 @@ our container needs access to a character device with major `42` and
any number of minor number (added as new devices appear), the
following rule would be added:
```
docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
```console
$ docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
```
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`

View File

@ -14,7 +14,7 @@ You can use the full or shortened container ID or the container name set using
Inspect the changes to an `nginx` container:
```bash
```console
$ docker diff 1fdfd1f54c1b
C /dev

View File

@ -29,7 +29,7 @@ container.
In order to retrieve logs before a specific point in time, run:
```bash
```console
$ docker run --name test -d busybox sh -c "while true; do $(echo date); sleep 1; done"
$ date
Tue 14 Nov 2017 16:40:00 CET

View File

@ -40,7 +40,7 @@ To limit a container's cpu-shares to 512, first identify the container
name or ID. You can use **docker ps** to find these values. You can also
use the ID returned from the **docker run** command. Then, do the following:
```bash
```console
$ docker container update --cpu-shares 512 abebf7571666
```
@ -48,7 +48,7 @@ $ docker container update --cpu-shares 512 abebf7571666
To update multiple resource configurations for multiple containers:
```bash
```console
$ docker container update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse
```
@ -64,19 +64,19 @@ NOTE: The **--kernel-memory** option has been deprecated since Docker 20.10.
For example, if you started a container with this command:
```bash
```console
$ docker run -dit --name test --kernel-memory 50M ubuntu bash
```
You can update kernel memory while the container is running:
```bash
```console
$ docker container update --kernel-memory 80M test
```
If you started a container *without* kernel memory initialized:
```bash
```console
$ docker run -dit --name test2 --memory 300M ubuntu bash
```
@ -95,7 +95,7 @@ container.
To update restart policy for one or more containers:
```bash
```console
$ docker container update --restart=on-failure:3 abebf7571666 hopeful_morse
```

View File

@ -40,7 +40,7 @@ output the data exactly as the template declares or, when using the
The following example uses a template without headers and outputs the
`ID` and `CreatedSince` entries separated by a colon for all images:
```bash
```console
$ docker images --format "{{.ID}}: {{.CreatedSince}} ago"
cc1b61406712: 2 weeks ago

View File

@ -2,15 +2,16 @@ Connects a container to a network. You can connect a container by name
or by ID. Once connected, the container can communicate with other containers in
the same network.
```bash
```console
$ docker network connect multi-host-network container1
```
You can also use the `docker run --network=<network-name>` option to start a container and immediately connect it to a network.
```bash
```console
$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox
```
You can pause, restart, and stop containers that are connected to a network.
A container connects to its configured networks when it runs.
@ -21,11 +22,9 @@ to specify an `--ip-range` when creating the network, and choose the static IP
address(es) from outside that range. This ensures that the IP address is not
given to another container while this container is not on the network.
```bash
```console
$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network
```
```bash
$ docker network connect --ip 172.20.128.2 multi-host-network container2
```

View File

@ -5,59 +5,60 @@ network driver you can specify that `DRIVER` here also. If you don't specify the
When you install Docker Engine it creates a `bridge` network automatically. This
network corresponds to the `docker0` bridge that Engine has traditionally relied
on. When you launch a new container with `docker run` it automatically connects to
this bridge network. You cannot remove this default bridge network but you can
this bridge network. You cannot remove this default bridge network, but you can
create new ones using the `network create` command.
```bash
```console
$ docker network create -d bridge my-bridge-network
```
Bridge networks are isolated networks on a single Engine installation. If you
want to create a network that spans multiple Docker hosts each running an
Engine, you must create an `overlay` network. Unlike `bridge` networks overlay
networks require some pre-existing conditions before you can create one. These
conditions are:
Engine, you must enable Swarm mode, and create an `overlay` network. To read more
about overlay networks with Swarm mode, see ["*use overlay networks*"](https://docs.docker.com/network/overlay/).
* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores.
* A cluster of hosts with connectivity to the key-value store.
* A properly configured Engine `daemon` on each host in the cluster.
Once you have enabled swarm mode, you can create a swarm-scoped overlay network:
The `dockerd` options that support the `overlay` network are:
* `--cluster-store`
* `--cluster-store-opt`
* `--cluster-advertise`
To read more about these options and how to configure them, see ["*Get started
with multi-host
network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/).
It is also a good idea, though not required, that you install Docker Swarm on to
manage the cluster that makes up your network. Swarm provides sophisticated
discovery and server management that can assist your implementation.
Once you have prepared the `overlay` network prerequisites you simply choose a
Docker host in the cluster and issue the following to create the network:
```bash
$ docker network create -d overlay my-multihost-network
```console
$ docker network create --scope=swarm --attachable -d overlay my-multihost-network
```
By default, swarm-scoped networks do not allow manually started containers to
be attached. This restriction is added to prevent someone that has access to
a non-manager node in the swarm cluster from running a container that is able
to access the network stack of a swarm service.
The `--attachable` option used in the example above disables this restriction,
and allows for both swarm services and manually started containers to attach to
the oerlay network.
Network names must be unique. The Docker daemon attempts to identify naming
conflicts but this is not guaranteed. It is the user's responsibility to avoid
name conflicts.
### Overlay network limitations
You should create overlay networks with `/24` blocks (the default), which limits
you to 256 IP addresses, when you create networks using the default VIP-based
endpoint-mode. This recommendation addresses
[limitations with swarm mode](https://github.com/moby/moby/issues/30820). If you
need more than 256 IP addresses, do not increase the IP block size. You can
either use `dnsrr` endpoint mode with an external load balancer, or use multiple
smaller overlay networks. See
[Configure service discovery](https://docs.docker.com/engine/swarm/networking/#configure-service-discovery)
for more information about different endpoint modes.
## Connect containers
When you start a container use the `--network` flag to connect it to a network.
This adds the `busybox` container to the `mynet` network.
When you start a container, use the `--network` flag to connect it to a network.
This example adds the `busybox` container to the `mynet` network:
```bash
```console
$ docker run -itd --network=mynet busybox
```
If you want to add a container to a network after the container is already
running use the `docker network connect` subcommand.
running, use the `docker network connect` subcommand.
You can connect multiple containers to the same network. Once connected, the
containers can communicate using only another container's IP address or name.
@ -68,7 +69,7 @@ Engines can also communicate in this way.
You can disconnect a container from a network using the `docker network
disconnect` command.
## Specifying advanced options
### Specify advanced options
When you create a network, Engine creates a non-overlapping subnetwork for the
network by default. This subnetwork is not a subdivision of an existing network.
@ -76,14 +77,14 @@ It is purely for ip-addressing purposes. You can override this default and
specify subnetwork values directly using the `--subnet` option. On a
`bridge` network you can only create a single subnet:
```bash
$ docker network create -d bridge --subnet=192.168.0.0/16 br0
```console
$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0
```
Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address`
options.
```bash
```console
$ docker network create \
--driver=bridge \
--subnet=172.28.0.0/16 \
@ -94,23 +95,59 @@ $ docker network create \
If you omit the `--gateway` flag the Engine selects one for you from inside a
preferred pool. For `overlay` networks and for network driver plugins that
support it you can create multiple subnetworks.
support it you can create multiple subnetworks. This example uses two `/25`
subnet mask to adhere to the current guidance of not having more than 256 IPs in
a single overlay network. Each of the subnetworks has 126 usable addresses.
```bash
```console
$ docker network create -d overlay \
--subnet=192.168.0.0/16 \
--subnet=192.170.0.0/16 \
--gateway=192.168.0.100 \
--gateway=192.170.0.100 \
--ip-range=192.168.1.0/24 \
--aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \
--aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \
--subnet=192.168.10.0/25 \
--subnet=192.168.20.0/25 \
--gateway=192.168.10.100 \
--gateway=192.168.20.100 \
--aux-address="my-router=192.168.10.5" --aux-address="my-switch=192.168.10.6" \
--aux-address="my-printer=192.168.20.5" --aux-address="my-nas=192.168.20.6" \
my-multihost-network
```
Be sure that your subnetworks do not overlap. If they do, the network create
fails and Engine returns an error.
### Bridge driver options
When creating a custom network, the default network driver (i.e. `bridge`) has
additional options that can be passed. The following are those options and the
equivalent docker daemon flags used for docker0 bridge:
| Option | Equivalent | Description |
|--------------------------------------------------|-------------|-------------------------------------------------------|
| `com.docker.network.bridge.name` | - | Bridge name to be used when creating the Linux bridge |
| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading |
| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity |
| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports |
| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU |
| `com.docker.network.container_iface_prefix` | - | Set a custom prefix for container interfaces |
The following arguments can be passed to `docker network create` for any
network driver, again with their approximate equivalents to `docker daemon`.
| Argument | Equivalent | Description |
|--------------|----------------|--------------------------------------------|
| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet |
| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range |
| `--internal` | - | Restrict external access to the network |
| `--ipv6` | `--ipv6` | Enable IPv6 networking |
| `--subnet` | `--bip` | Subnet for network |
For example, let's use `-o` or `--opt` options to specify an IP address binding
when publishing ports:
```console
$ docker network create \
-o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \
simple-network
```
### Network internal mode
By default, when you connect a container to an `overlay` network, Docker also
@ -126,11 +163,11 @@ one ingress network can be created at the time. The network can be removed only
if no services depend on it. Any option available when creating an overlay network
is also available when creating the ingress network, besides the `--attachable` option.
```bash
```console
$ docker network create -d overlay \
--subnet=10.11.0.0/16 \
--ingress \
--opt com.docker.network.mtu=9216 \
--opt com.docker.network.driver.mtu=9216 \
--opt encrypted=true \
my-ingress-network
```
@ -139,7 +176,7 @@ $ docker network create -d overlay \
You can create services on the predefined docker networks `bridge` and `host`.
```bash
```console
$ docker service create --name my-service \
--network host \
--replicas 2 \
@ -149,10 +186,10 @@ $ docker service create --name my-service \
### Swarm networks with local scope drivers
You can create a swarm network with local scope network drivers. You do so
by promoting the network scope to `swarm` during the creation of the network.
You will then be able to use this network when creating services.
by promoting the network scope to `swarm` during the creation of the network.
You will then be able to use this network when creating services.
```bash
```console
$ docker network create -d bridge \
--scope swarm \
--attachable \
@ -162,16 +199,13 @@ $ docker network create -d bridge \
For network drivers which provide connectivity across hosts (ex. macvlan), if
node specific configurations are needed in order to plumb the network on each
host, you will supply that configuration via a configuration only network.
When you create the swarm scoped network, you will then specify the name of the
When you create the swarm scoped network, you will then specify the name of the
network which contains the configuration.
```bash
```console
node1$ docker network create --config-only --subnet 192.168.100.0/24 --gateway 192.168.100.115 mv-config
node2$ docker network create --config-only --subnet 192.168.200.0/24 --gateway 192.168.200.202 mv-config
node1$ docker network create -d macvlan --scope swarm --config-from mv-config --attachable swarm-network
```

View File

@ -1,5 +1,5 @@
Disconnects a container from a network.
```bash
```console
$ docker network disconnect multi-host-network container1
```

View File

@ -1,6 +1,6 @@
Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network:
```bash
```console
$ sudo docker run -itd --name=container1 busybox
f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27
@ -14,7 +14,7 @@ template for each result. Go's
[text/template](http://golang.org/pkg/text/template/) package
describes all the details of the format.
```bash
```console
$ sudo docker network inspect bridge
[
{
@ -63,7 +63,7 @@ $ sudo docker network inspect bridge
Returns the information about the user-defined network:
```bash
```console
$ docker network create simple-network
69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
$ docker network inspect simple-network
@ -95,7 +95,7 @@ and the IPs of the nodes where the tasks are running.
Following is an example output for an overlay network `ov1` that has one service `s1`
attached to. service `s1` in this case has three replicas.
```bash
```console
$ docker network inspect --verbose ov1
[
{

View File

@ -1,18 +1,18 @@
Lists all the networks the Engine `daemon` knows about. This includes the
networks that span across multiple hosts in a cluster, for example:
```bash
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
7fca4eb8c647 bridge bridge local
9f904ee27bf5 none null local
cf03ee007fb4 host host local
78b03ee04fc4 multi-host overlay swarm
```console
$ docker network ls
NETWORK ID NAME DRIVER SCOPE
7fca4eb8c647 bridge bridge local
9f904ee27bf5 none null local
cf03ee007fb4 host host local
78b03ee04fc4 multi-host overlay swarm
```
Use the `--no-trunc` option to display the full network id:
```bash
```console
$ docker network ls --no-trunc
NETWORK ID NAME DRIVER
18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null
@ -44,7 +44,7 @@ The `driver` filter matches networks based on their driver.
The following example matches networks with the `bridge` driver:
```bash
```console
$ docker network ls --filter driver=bridge
NETWORK ID NAME DRIVER
db9db329f835 test1 bridge
@ -58,7 +58,7 @@ The `id` filter matches on all or part of a network's ID.
The following filter matches all networks with an ID containing the
`63d1ff1f77b0...` string.
```bash
```console
$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161
NETWORK ID NAME DRIVER
63d1ff1f77b0 dev bridge
@ -66,7 +66,7 @@ NETWORK ID NAME DRIVER
You can also filter for a substring in an ID as this shows:
```bash
```console
$ docker network ls --filter id=95e74588f40d
NETWORK ID NAME DRIVER
95e74588f40d foo bridge
@ -83,7 +83,7 @@ value.
The following filter matches networks with the `usage` label regardless of its value.
```bash
```console
$ docker network ls -f "label=usage"
NETWORK ID NAME DRIVER
db9db329f835 test1 bridge
@ -92,7 +92,7 @@ f6e212da9dfd test2 bridge
The following filter matches networks with the `usage` label with the `prod` value.
```bash
```console
$ docker network ls -f "label=usage=prod"
NETWORK ID NAME DRIVER
f6e212da9dfd test2 bridge
@ -104,7 +104,7 @@ The `name` filter matches on all or part of a network's name.
The following filter matches all networks with a name containing the `foobar` string.
```bash
```console
$ docker network ls --filter name=foobar
NETWORK ID NAME DRIVER
06e7eef0a170 foobar bridge
@ -112,7 +112,7 @@ NETWORK ID NAME DRIVER
You can also filter for a substring in a name as this shows:
```bash
```console
$ docker network ls --filter name=foo
NETWORK ID NAME DRIVER
95e74588f40d foo bridge
@ -125,7 +125,7 @@ The `scope` filter matches networks based on their scope.
The following example matches networks with the `swarm` scope:
```bash
```console
$ docker network ls --filter scope=swarm
NETWORK ID NAME DRIVER SCOPE
xbtm0v4f1lfh ingress overlay swarm
@ -134,7 +134,7 @@ ic6r88twuu92 swarmnet overlay swarm
The following example matches networks with the `local` scope:
```bash
```console
$ docker network ls --filter scope=local
NETWORK ID NAME DRIVER SCOPE
e85227439ac7 bridge bridge local
@ -150,7 +150,7 @@ The `type` filter supports two values; `builtin` displays predefined networks
The following filter matches all user defined networks:
```bash
```console
$ docker network ls --filter type=custom
NETWORK ID NAME DRIVER
95e74588f40d foo bridge
@ -160,7 +160,7 @@ NETWORK ID NAME DRIVER
By having this flag it allows for batch cleanup. For example, use this filter
to delete all user defined networks:
```bash
```console
$ docker network rm `docker network ls --filter type=custom -q`
```

View File

@ -2,16 +2,16 @@ Removes one or more networks by name or identifier. To remove a network,
you must first disconnect any containers connected to it.
To remove the network named 'my-network':
```bash
$ docker network rm my-network
```console
$ docker network rm my-network
```
To delete multiple networks in a single `docker network rm` command, provide
multiple network names or ids. The following example deletes a network with id
`3695c422697f` and a network named `my-network`:
```bash
$ docker network rm 3695c422697f my-network
```console
$ docker network rm 3695c422697f my-network
```
When you specify multiple networks, the command attempts to delete each in turn.